code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = abs(lowerCAmelCase__ )
lowercase = 0
while n > 0:
res += n % 10
n //= 10
return res
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = abs(lowerCAmelCase__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return sum(int(lowerCAmelCase__ ) for c in str(abs(lowerCAmelCase__ ) ) )
def UpperCamelCase ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
lowercase = f'{func.__name__}({value})'
lowercase = timeit(f'__main__.{call}' , setup='''import __main__''' )
print(f'{call:56} = {func(lowerCAmelCase__ )} -- {timing:.4f} seconds' )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(lowerCAmelCase__ , lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 101 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_UpperCAmelCase : Union[str, Any] ="""pt"""
elif is_tf_available():
_UpperCAmelCase : List[Any] ="""tf"""
else:
_UpperCAmelCase : Optional[int] ="""jax"""
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PerceiverTokenizer
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def lowercase_ ( self ) -> Optional[int]:
super().setUp()
lowerCAmelCase_ : str = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self ) -> Any:
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def lowercase_ ( self , **__lowercase ) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase=False , __lowercase=2_0 , __lowercase=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowerCAmelCase_ : Optional[Any] = []
for i in range(len(__lowercase ) ):
try:
lowerCAmelCase_ : List[str] = tokenizer.decode([i] , clean_up_tokenization_spaces=__lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCAmelCase_ : List[str] = list(filter(lambda __lowercase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , __lowercase ) )
lowerCAmelCase_ : Optional[int] = list(filter(lambda __lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowercase ) , __lowercase ) )
if max_length is not None and len(__lowercase ) > max_length:
lowerCAmelCase_ : Union[str, Any] = toks[:max_length]
if min_length is not None and len(__lowercase ) < min_length and len(__lowercase ) > 0:
while len(__lowercase ) < min_length:
lowerCAmelCase_ : Union[str, Any] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase_ : List[str] = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase_ : int = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
if " " not in output_txt and len(__lowercase ) > 1:
lowerCAmelCase_ : Optional[int] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowercase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowercase )
)
if with_prefix_space:
lowerCAmelCase_ : Any = ''' ''' + output_txt
lowerCAmelCase_ : List[str] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
return output_txt, output_ids
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : List[str] = self.perceiver_tokenizer
lowerCAmelCase_ : Any = '''Unicode €.'''
lowerCAmelCase_ : Dict = tokenizer(__lowercase )
lowerCAmelCase_ : Any = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['''input_ids'''] , __lowercase )
# decoding
lowerCAmelCase_ : str = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , '''[CLS]Unicode €.[SEP]''' )
lowerCAmelCase_ : Optional[int] = tokenizer('''e è é ê ë''' )
lowerCAmelCase_ : str = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['''input_ids'''] , __lowercase )
# decoding
lowerCAmelCase_ : int = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Any = self.perceiver_tokenizer
lowerCAmelCase_ : Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
lowerCAmelCase_ : Optional[int] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
lowerCAmelCase_ : Optional[int] = tokenizer(__lowercase , padding=__lowercase , return_tensors=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
if FRAMEWORK != "jax":
lowerCAmelCase_ : str = list(batch.input_ids.numpy()[0] )
else:
lowerCAmelCase_ : Union[str, Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__lowercase , __lowercase )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : int = self.perceiver_tokenizer
lowerCAmelCase_ : Optional[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCAmelCase_ : List[Any] = tokenizer(__lowercase , padding=__lowercase , return_tensors=__lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , __lowercase )
self.assertIn('''attention_mask''' , __lowercase )
self.assertNotIn('''decoder_input_ids''' , __lowercase )
self.assertNotIn('''decoder_attention_mask''' , __lowercase )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = self.perceiver_tokenizer
lowerCAmelCase_ : int = [
'''Summary of the text.''',
'''Another summary.''',
]
lowerCAmelCase_ : List[str] = tokenizer(
text_target=__lowercase , max_length=3_2 , padding='''max_length''' , truncation=__lowercase , return_tensors=__lowercase )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
def lowercase_ ( self ) -> Optional[Any]:
# safety check on max_len default value so we are sure the test works
lowerCAmelCase_ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
lowerCAmelCase_ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase_ : Union[str, Any] = tempfile.mkdtemp()
lowerCAmelCase_ : str = ''' He is very happy, UNwant\u00E9d,running'''
lowerCAmelCase_ : Optional[int] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : Any = tokenizer.__class__.from_pretrained(__lowercase )
lowerCAmelCase_ : Tuple = after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
shutil.rmtree(__lowercase )
lowerCAmelCase_ : Optional[int] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase_ : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase_ : List[str] = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
lowerCAmelCase_ : Any = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowerCAmelCase_ : str = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : str = tokenizer.__class__.from_pretrained(__lowercase )
lowerCAmelCase_ : Optional[Any] = after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
lowerCAmelCase_ : str = tokenizer.__class__.from_pretrained(__lowercase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(__lowercase )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowercase )
with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase_ : Tuple = json.load(__lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase_ : Any = json.load(__lowercase )
lowerCAmelCase_ : Optional[int] = [f"""<extra_id_{i}>""" for i in range(1_2_5 )]
lowerCAmelCase_ : Optional[Any] = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
lowerCAmelCase_ : Any = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowercase , __lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowercase , __lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase_ : int = tokenizer_class.from_pretrained(
__lowercase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase_ : Tuple = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=__lowercase )]
lowerCAmelCase_ : Dict = tokenizer_class.from_pretrained(
__lowercase , additional_special_tokens=__lowercase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Any = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , '''�''' )
def lowercase_ ( self ) -> Tuple:
pass
def lowercase_ ( self ) -> Any:
pass
def lowercase_ ( self ) -> Tuple:
pass
def lowercase_ ( self ) -> List[str]:
pass
def lowercase_ ( self ) -> Dict:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
lowerCAmelCase_ : Tuple = self.get_tokenizers(fast=__lowercase , do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase_ : List[str] = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
lowerCAmelCase_ : Optional[int] = tokenizer.convert_tokens_to_string(__lowercase )
self.assertIsInstance(__lowercase , __lowercase ) | 262 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {
"""huggingface/time-series-transformer-tourism-monthly""": (
"""https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"""
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='time_series_transformer'
lowerCamelCase__ ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__(self , a_ = None , a_ = None , a_ = "student_t" , a_ = "nll" , a_ = 1 , a_ = [1, 2, 3, 4, 5, 6, 7] , a_ = "mean" , a_ = 0 , a_ = 0 , a_ = 0 , a_ = 0 , a_ = None , a_ = None , a_ = 32 , a_ = 32 , a_ = 2 , a_ = 2 , a_ = 2 , a_ = 2 , a_ = True , a_ = "gelu" , a_ = 64 , a_ = 0.1 , a_ = 0.1 , a_ = 0.1 , a_ = 0.1 , a_ = 0.1 , a_ = 1_00 , a_ = 0.02 , a_=True , **a_ , ):
'''simple docstring'''
__snake_case : Union[str, Any] = prediction_length
__snake_case : List[str] = context_length or prediction_length
__snake_case : Tuple = distribution_output
__snake_case : Union[str, Any] = loss
__snake_case : Tuple = input_size
__snake_case : Optional[Any] = num_time_features
__snake_case : Tuple = lags_sequence
__snake_case : Optional[int] = scaling
__snake_case : Optional[Any] = num_dynamic_real_features
__snake_case : Optional[Any] = num_static_real_features
__snake_case : List[str] = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(a_ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
__snake_case : Dict = cardinality
else:
__snake_case : Optional[int] = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(a_ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
__snake_case : Dict = embedding_dimension
else:
__snake_case : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__snake_case : int = num_parallel_samples
# Transformer architecture configuration
__snake_case : Union[str, Any] = input_size * len(a_ ) + self._number_of_features
__snake_case : Any = d_model
__snake_case : Union[str, Any] = encoder_attention_heads
__snake_case : Any = decoder_attention_heads
__snake_case : Any = encoder_ffn_dim
__snake_case : List[Any] = decoder_ffn_dim
__snake_case : Tuple = encoder_layers
__snake_case : Union[str, Any] = decoder_layers
__snake_case : int = dropout
__snake_case : List[str] = attention_dropout
__snake_case : Optional[int] = activation_dropout
__snake_case : Dict = encoder_layerdrop
__snake_case : Optional[Any] = decoder_layerdrop
__snake_case : int = activation_function
__snake_case : Union[str, Any] = init_std
__snake_case : int = use_cache
super().__init__(is_encoder_decoder=a_ , **a_ )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 102 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class snake_case__:
'''simple docstring'''
@staticmethod
def lowercase_ ( *__lowercase , **__lowercase ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
@require_torch
class snake_case__( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCAmelCase_ : str = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def lowercase_ ( self , __lowercase , __lowercase ) -> str:
lowerCAmelCase_ : Tuple = object_detector(examples[0] , threshold=0.0 )
lowerCAmelCase_ : Dict = len(__lowercase )
self.assertGreater(__lowercase , 0 )
self.assertEqual(
__lowercase , [
{
'''score''': ANY(__lowercase ),
'''label''': ANY(__lowercase ),
'''box''': {'''xmin''': ANY(__lowercase ), '''ymin''': ANY(__lowercase ), '''xmax''': ANY(__lowercase ), '''ymax''': ANY(__lowercase )},
}
for i in range(__lowercase )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def lowercase_ ( self ) -> List[str]:
pass
@require_torch
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Union[str, Any] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCAmelCase_ : Union[str, Any] = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
] , )
lowerCAmelCase_ : Union[str, Any] = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
]
] , )
@require_torch
@slow
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Any = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase_ : Dict = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
] , )
lowerCAmelCase_ : Tuple = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def lowercase_ ( self ) -> List[str]:
pass
@require_torch
@slow
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Any = 0.2
lowerCAmelCase_ : List[Any] = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase_ : Optional[Any] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=__lowercase , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
] , )
@require_torch
@slow
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Dict = 2
lowerCAmelCase_ : Union[str, Any] = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase_ : Optional[Any] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=__lowercase , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
] , ) | 262 | 0 |
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=UpperCamelCase_ ):
_a = ['''sentencepiece''']
def __init__( self : Optional[int] , *A_ : List[str] , **A_ : List[Any]):
requires_backends(self , ['''sentencepiece'''])
class __snake_case ( metaclass=UpperCamelCase_ ):
_a = ['''sentencepiece''']
def __init__( self : Optional[Any] , *A_ : str , **A_ : Any):
requires_backends(self , ['''sentencepiece'''])
class __snake_case ( metaclass=UpperCamelCase_ ):
_a = ['''sentencepiece''']
def __init__( self : Union[str, Any] , *A_ : Union[str, Any] , **A_ : Dict):
requires_backends(self , ['''sentencepiece'''])
class __snake_case ( metaclass=UpperCamelCase_ ):
_a = ['''sentencepiece''']
def __init__( self : Optional[int] , *A_ : Tuple , **A_ : Tuple):
requires_backends(self , ['''sentencepiece'''])
class __snake_case ( metaclass=UpperCamelCase_ ):
_a = ['''sentencepiece''']
def __init__( self : int , *A_ : Optional[int] , **A_ : int):
requires_backends(self , ['''sentencepiece'''])
class __snake_case ( metaclass=UpperCamelCase_ ):
_a = ['''sentencepiece''']
def __init__( self : Optional[Any] , *A_ : List[str] , **A_ : List[str]):
requires_backends(self , ['''sentencepiece'''])
class __snake_case ( metaclass=UpperCamelCase_ ):
_a = ['''sentencepiece''']
def __init__( self : Optional[Any] , *A_ : List[Any] , **A_ : str):
requires_backends(self , ['''sentencepiece'''])
class __snake_case ( metaclass=UpperCamelCase_ ):
_a = ['''sentencepiece''']
def __init__( self : List[str] , *A_ : List[str] , **A_ : Union[str, Any]):
requires_backends(self , ['''sentencepiece'''])
class __snake_case ( metaclass=UpperCamelCase_ ):
_a = ['''sentencepiece''']
def __init__( self : List[str] , *A_ : Optional[int] , **A_ : Any):
requires_backends(self , ['''sentencepiece'''])
class __snake_case ( metaclass=UpperCamelCase_ ):
_a = ['''sentencepiece''']
def __init__( self : int , *A_ : Dict , **A_ : int):
requires_backends(self , ['''sentencepiece'''])
class __snake_case ( metaclass=UpperCamelCase_ ):
_a = ['''sentencepiece''']
def __init__( self : Tuple , *A_ : List[Any] , **A_ : Optional[Any]):
requires_backends(self , ['''sentencepiece'''])
class __snake_case ( metaclass=UpperCamelCase_ ):
_a = ['''sentencepiece''']
def __init__( self : List[str] , *A_ : int , **A_ : Tuple):
requires_backends(self , ['''sentencepiece'''])
class __snake_case ( metaclass=UpperCamelCase_ ):
_a = ['''sentencepiece''']
def __init__( self : Optional[Any] , *A_ : Optional[int] , **A_ : Union[str, Any]):
requires_backends(self , ['''sentencepiece'''])
class __snake_case ( metaclass=UpperCamelCase_ ):
_a = ['''sentencepiece''']
def __init__( self : Union[str, Any] , *A_ : Union[str, Any] , **A_ : List[Any]):
requires_backends(self , ['''sentencepiece'''])
class __snake_case ( metaclass=UpperCamelCase_ ):
_a = ['''sentencepiece''']
def __init__( self : Optional[int] , *A_ : Tuple , **A_ : Tuple):
requires_backends(self , ['''sentencepiece'''])
class __snake_case ( metaclass=UpperCamelCase_ ):
_a = ['''sentencepiece''']
def __init__( self : Optional[int] , *A_ : Dict , **A_ : Union[str, Any]):
requires_backends(self , ['''sentencepiece'''])
class __snake_case ( metaclass=UpperCamelCase_ ):
_a = ['''sentencepiece''']
def __init__( self : List[Any] , *A_ : Tuple , **A_ : List[str]):
requires_backends(self , ['''sentencepiece'''])
class __snake_case ( metaclass=UpperCamelCase_ ):
_a = ['''sentencepiece''']
def __init__( self : Union[str, Any] , *A_ : Optional[int] , **A_ : Optional[int]):
requires_backends(self , ['''sentencepiece'''])
class __snake_case ( metaclass=UpperCamelCase_ ):
_a = ['''sentencepiece''']
def __init__( self : Optional[Any] , *A_ : List[Any] , **A_ : str):
requires_backends(self , ['''sentencepiece'''])
class __snake_case ( metaclass=UpperCamelCase_ ):
_a = ['''sentencepiece''']
def __init__( self : List[Any] , *A_ : List[Any] , **A_ : Union[str, Any]):
requires_backends(self , ['''sentencepiece'''])
class __snake_case ( metaclass=UpperCamelCase_ ):
_a = ['''sentencepiece''']
def __init__( self : str , *A_ : Optional[int] , **A_ : List[Any]):
requires_backends(self , ['''sentencepiece'''])
class __snake_case ( metaclass=UpperCamelCase_ ):
_a = ['''sentencepiece''']
def __init__( self : List[str] , *A_ : List[Any] , **A_ : Optional[Any]):
requires_backends(self , ['''sentencepiece'''])
class __snake_case ( metaclass=UpperCamelCase_ ):
_a = ['''sentencepiece''']
def __init__( self : str , *A_ : Optional[int] , **A_ : Dict):
requires_backends(self , ['''sentencepiece'''])
class __snake_case ( metaclass=UpperCamelCase_ ):
_a = ['''sentencepiece''']
def __init__( self : int , *A_ : int , **A_ : Optional[int]):
requires_backends(self , ['''sentencepiece'''])
class __snake_case ( metaclass=UpperCamelCase_ ):
_a = ['''sentencepiece''']
def __init__( self : List[Any] , *A_ : List[str] , **A_ : Tuple):
requires_backends(self , ['''sentencepiece'''])
class __snake_case ( metaclass=UpperCamelCase_ ):
_a = ['''sentencepiece''']
def __init__( self : Union[str, Any] , *A_ : Any , **A_ : Union[str, Any]):
requires_backends(self , ['''sentencepiece'''])
class __snake_case ( metaclass=UpperCamelCase_ ):
_a = ['''sentencepiece''']
def __init__( self : Dict , *A_ : List[str] , **A_ : List[str]):
requires_backends(self , ['''sentencepiece'''])
class __snake_case ( metaclass=UpperCamelCase_ ):
_a = ['''sentencepiece''']
def __init__( self : str , *A_ : List[Any] , **A_ : Optional[Any]):
requires_backends(self , ['''sentencepiece'''])
class __snake_case ( metaclass=UpperCamelCase_ ):
_a = ['''sentencepiece''']
def __init__( self : Tuple , *A_ : Optional[Any] , **A_ : int):
requires_backends(self , ['''sentencepiece'''])
class __snake_case ( metaclass=UpperCamelCase_ ):
_a = ['''sentencepiece''']
def __init__( self : Optional[Any] , *A_ : Optional[Any] , **A_ : Dict):
requires_backends(self , ['''sentencepiece'''])
class __snake_case ( metaclass=UpperCamelCase_ ):
_a = ['''sentencepiece''']
def __init__( self : Tuple , *A_ : Optional[int] , **A_ : Union[str, Any]):
requires_backends(self , ['''sentencepiece'''])
| 103 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels | 262 | 0 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
lowerCAmelCase__ = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase__ = {
'''allenai/led-base-16384''': 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _A ( ):
"""simple docstring"""
__lowercase = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__lowercase = bs[:]
__lowercase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(A__ )
cs.append(2**8 + n )
n += 1
__lowercase = [chr(A__ ) for n in cs]
return dict(zip(A__ , A__ ) )
def _A ( A__ ):
"""simple docstring"""
__lowercase = set()
__lowercase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowercase = char
return pairs
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self : Dict ,lowercase__ : Tuple ,lowercase__ : Dict ,lowercase__ : Union[str, Any]="replace" ,lowercase__ : Any="<s>" ,lowercase__ : Dict="</s>" ,lowercase__ : Union[str, Any]="</s>" ,lowercase__ : Dict="<s>" ,lowercase__ : Dict="<unk>" ,lowercase__ : Union[str, Any]="<pad>" ,lowercase__ : Tuple="<mask>" ,lowercase__ : List[str]=False ,**lowercase__ : List[Any] ,):
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else bos_token
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else eos_token
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else sep_token
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else cls_token
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else unk_token
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else mask_token
super().__init__(
errors=lowercase__ ,bos_token=lowercase__ ,eos_token=lowercase__ ,unk_token=lowercase__ ,sep_token=lowercase__ ,cls_token=lowercase__ ,pad_token=lowercase__ ,mask_token=lowercase__ ,add_prefix_space=lowercase__ ,**lowercase__ ,)
with open(lowercase__ ,encoding='''utf-8''' ) as vocab_handle:
__lowercase = json.load(lowercase__ )
__lowercase = {v: k for k, v in self.encoder.items()}
__lowercase = errors # how to handle errors in decoding
__lowercase = bytes_to_unicode()
__lowercase = {v: k for k, v in self.byte_encoder.items()}
with open(lowercase__ ,encoding='''utf-8''' ) as merges_handle:
__lowercase = merges_handle.read().split('''\n''' )[1:-1]
__lowercase = [tuple(merge.split() ) for merge in bpe_merges]
__lowercase = dict(zip(lowercase__ ,range(len(lowercase__ ) ) ) )
__lowercase = {}
__lowercase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowercase = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return dict(self.encoder ,**self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Optional[Any] ):
if token in self.cache:
return self.cache[token]
__lowercase = tuple(lowercase__ )
__lowercase = get_pairs(lowercase__ )
if not pairs:
return token
while True:
__lowercase = min(lowercase__ ,key=lambda lowercase__ : self.bpe_ranks.get(lowercase__ ,float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__lowercase , __lowercase = bigram
__lowercase = []
__lowercase = 0
while i < len(lowercase__ ):
try:
__lowercase = word.index(lowercase__ ,lowercase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowercase = j
if word[i] == first and i < len(lowercase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowercase = tuple(lowercase__ )
__lowercase = new_word
if len(lowercase__ ) == 1:
break
else:
__lowercase = get_pairs(lowercase__ )
__lowercase = ''' '''.join(lowercase__ )
__lowercase = word
return word
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : List[Any] ):
__lowercase = []
for token in re.findall(self.pat ,lowercase__ ):
__lowercase = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowercase__ ).split(''' ''' ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Tuple ):
return self.encoder.get(lowercase__ ,self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Any ):
return self.decoder.get(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Optional[Any] ):
__lowercase = ''''''.join(lowercase__ )
__lowercase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' ,errors=self.errors )
return text
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : str ,lowercase__ : Optional[str] = None ):
if not os.path.isdir(lowercase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase = os.path.join(
lowercase__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(
lowercase__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowercase__ ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowercase__ ,ensure_ascii=lowercase__ ) + '''\n''' )
__lowercase = 0
with open(lowercase__ ,'''w''' ,encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowercase__ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''' )
__lowercase = token_index
writer.write(''' '''.join(lowercase__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase = [self.cls_token_id]
__lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ,lowercase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ ,token_ids_a=lowercase__ ,already_has_special_tokens=lowercase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase__ )) + [1]
return [1] + ([0] * len(lowercase__ )) + [1, 1] + ([0] * len(lowercase__ )) + [1]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Optional[int] ,lowercase__ : Optional[Any]=False ,**lowercase__ : Optional[int] ):
__lowercase = kwargs.pop('''add_prefix_space''' ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowercase__ ) > 0 and not text[0].isspace()):
__lowercase = ''' ''' + text
return (text, kwargs)
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Union[Dict[str, EncodedInput], BatchEncoding] ,lowercase__ : Optional[int] = None ,lowercase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[bool] = None ,):
__lowercase = super()._pad(
encoded_inputs=lowercase__ ,max_length=lowercase__ ,padding_strategy=lowercase__ ,pad_to_multiple_of=lowercase__ ,return_attention_mask=lowercase__ ,)
# Load from model defaults
if return_attention_mask is None:
__lowercase = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__lowercase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__lowercase = len(encoded_inputs['''global_attention_mask'''] ) != len(lowercase__ )
if needs_to_be_padded:
__lowercase = len(lowercase__ ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__lowercase = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
__lowercase = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 104 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
_UpperCAmelCase : Dict ={
"""susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""",
"""susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""",
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """ernie_m"""
SCREAMING_SNAKE_CASE__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __lowercase = 2_5_0_0_0_2 , __lowercase = 7_6_8 , __lowercase = 1_2 , __lowercase = 1_2 , __lowercase = 3_0_7_2 , __lowercase = "gelu" , __lowercase = 0.1 , __lowercase = 0.1 , __lowercase = 5_1_4 , __lowercase = 0.02 , __lowercase = 1 , __lowercase = 1e-05 , __lowercase=None , __lowercase=False , __lowercase=0.0 , **__lowercase , ) -> Tuple:
super().__init__(pad_token_id=__lowercase , **__lowercase )
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : Tuple = num_hidden_layers
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : Dict = intermediate_size
lowerCAmelCase_ : int = hidden_act
lowerCAmelCase_ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase_ : Any = attention_probs_dropout_prob
lowerCAmelCase_ : Union[str, Any] = max_position_embeddings
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : List[str] = layer_norm_eps
lowerCAmelCase_ : List[Any] = classifier_dropout
lowerCAmelCase_ : Any = is_decoder
lowerCAmelCase_ : List[Any] = act_dropout | 262 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : str = {
'''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''],
'''tokenization_deberta''': ['''DebertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = ['''DebertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
'''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DebertaForMaskedLM''',
'''DebertaForQuestionAnswering''',
'''DebertaForSequenceClassification''',
'''DebertaForTokenClassification''',
'''DebertaModel''',
'''DebertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = [
'''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDebertaForMaskedLM''',
'''TFDebertaForQuestionAnswering''',
'''TFDebertaForSequenceClassification''',
'''TFDebertaForTokenClassification''',
'''TFDebertaModel''',
'''TFDebertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
a : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=7 , __lowercase=3 , __lowercase=1_8 , __lowercase=3_0 , __lowercase=4_0_0 , __lowercase=True , __lowercase=None , __lowercase=True , __lowercase=None , ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = size if size is not None else {'''shortest_edge''': 2_0}
lowerCAmelCase_ : Any = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : Any = batch_size
lowerCAmelCase_ : Optional[int] = num_channels
lowerCAmelCase_ : Tuple = image_size
lowerCAmelCase_ : List[str] = min_resolution
lowerCAmelCase_ : Dict = max_resolution
lowerCAmelCase_ : Tuple = do_resize
lowerCAmelCase_ : Optional[Any] = size
lowerCAmelCase_ : Union[str, Any] = do_center_crop
lowerCAmelCase_ : Optional[Any] = crop_size
def lowercase_ ( self ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = MobileNetVaImageProcessingTester(self )
@property
def lowercase_ ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowercase , '''size''' ) )
self.assertTrue(hasattr(__lowercase , '''do_center_crop''' ) )
self.assertTrue(hasattr(__lowercase , '''crop_size''' ) )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 2_0} )
self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} )
lowerCAmelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} )
def lowercase_ ( self ) -> Tuple:
pass
def lowercase_ ( self ) -> Union[str, Any]:
# Initialize image_processing
lowerCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
lowerCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Tuple = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase_ ( self ) -> Optional[int]:
# Initialize image_processing
lowerCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
lowerCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Tuple = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase_ ( self ) -> Any:
# Initialize image_processing
lowerCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
lowerCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Dict = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , ) | 262 | 0 |
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__UpperCamelCase : List[str] = '''true'''
def __SCREAMING_SNAKE_CASE ( A_ , A_=82 , A_=16 ):
set_seed(42 )
lowerCAmelCase__ : Union[str, Any] = RegressionModel()
lowerCAmelCase__ : Optional[int] = deepcopy(A_ )
lowerCAmelCase__ : Any = RegressionDataset(length=A_ )
lowerCAmelCase__ : List[str] = DataLoader(A_ , batch_size=A_ )
model.to(accelerator.device )
lowerCAmelCase__ ,lowerCAmelCase__ : Dict = accelerator.prepare(A_ , A_ )
return model, ddp_model, dataloader
def __SCREAMING_SNAKE_CASE ( A_ , A_=False ):
lowerCAmelCase__ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
lowerCAmelCase__ : List[str] = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(A_ ):
lowerCAmelCase__ : Optional[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A_ , max_length=A_ )
return outputs
with accelerator.main_process_first():
lowerCAmelCase__ : Dict = dataset.map(
A_ , batched=A_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
lowerCAmelCase__ : int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(A_ ):
if use_longest:
return tokenizer.pad(A_ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(A_ , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return DataLoader(A_ , shuffle=A_ , collate_fn=A_ , batch_size=16 )
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : Union[str, Any] = Accelerator(dispatch_batches=A_ , split_batches=A_ )
lowerCAmelCase__ : str = get_dataloader(A_ , not dispatch_batches )
lowerCAmelCase__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=A_ )
lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = accelerator.prepare(A_ , A_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
lowerCAmelCase__ : Union[str, Any] = []
for batch in dataloader:
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = batch.values()
with torch.no_grad():
lowerCAmelCase__ : List[str] = model(A_ )
lowerCAmelCase__ ,lowerCAmelCase__ : str = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowerCAmelCase__ ,lowerCAmelCase__ : int = [], []
for logit, targ in logits_and_targets:
logits.append(A_ )
targs.append(A_ )
lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = torch.cat(A_ ), torch.cat(A_ )
return logits, targs
def __SCREAMING_SNAKE_CASE ( A_ , A_=82 , A_=False , A_=False , A_=16 ):
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = get_basic_setup(A_ , A_ , A_ )
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = generate_predictions(A_ , A_ , A_ )
assert (
len(A_ ) == num_samples
), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(A_ )}'
def __SCREAMING_SNAKE_CASE ( A_ = False , A_ = False ):
lowerCAmelCase__ : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = get_mrpc_setup(A_ , A_ )
# First do baseline
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Any = setup['''no''']
model.to(A_ )
model.eval()
for batch in dataloader:
batch.to(A_ )
with torch.inference_mode():
lowerCAmelCase__ : Optional[int] = model(**A_ )
lowerCAmelCase__ : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=A_ , references=batch['''labels'''] )
lowerCAmelCase__ : Dict = metric.compute()
# Then do distributed
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowerCAmelCase__ : Union[str, Any] = model(**A_ )
lowerCAmelCase__ : int = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ : int = batch['''labels''']
lowerCAmelCase__ ,lowerCAmelCase__ : int = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=A_ , references=A_ )
lowerCAmelCase__ : List[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : List[str] = Accelerator(split_batches=A_ , dispatch_batches=A_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(A_ , A_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowerCAmelCase__ : Optional[Any] = Accelerator(split_batches=A_ , dispatch_batches=A_ )
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(A_ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
lowerCAmelCase__ : List[str] = Accelerator()
test_torch_metrics(A_ , 5_12 )
accelerator.state._reset_state()
def __SCREAMING_SNAKE_CASE ( A_ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 106 |
from __future__ import annotations
import math
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase ) -> None:
lowerCAmelCase_ : str = size
# approximate the overall size of segment tree with given value
lowerCAmelCase_ : Dict = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
lowerCAmelCase_ : Dict = [0 for i in range(0 , 4 * size )]
lowerCAmelCase_ : Optional[int] = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowercase_ ( self , __lowercase ) -> int:
return idx * 2
def lowercase_ ( self , __lowercase ) -> int:
return idx * 2 + 1
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> None:
if left_element == right_element:
lowerCAmelCase_ : Tuple = a[left_element - 1]
else:
lowerCAmelCase_ : int = (left_element + right_element) // 2
self.build(self.left(__lowercase ) , __lowercase , __lowercase , __lowercase )
self.build(self.right(__lowercase ) , mid + 1 , __lowercase , __lowercase )
lowerCAmelCase_ : Any = max(
self.segment_tree[self.left(__lowercase )] , self.segment_tree[self.right(__lowercase )] )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> bool:
if self.flag[idx] is True:
lowerCAmelCase_ : Union[str, Any] = self.lazy[idx]
lowerCAmelCase_ : Union[str, Any] = False
if left_element != right_element:
lowerCAmelCase_ : Union[str, Any] = self.lazy[idx]
lowerCAmelCase_ : Any = self.lazy[idx]
lowerCAmelCase_ : List[str] = True
lowerCAmelCase_ : Optional[Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
lowerCAmelCase_ : Dict = val
if left_element != right_element:
lowerCAmelCase_ : Union[str, Any] = val
lowerCAmelCase_ : List[Any] = val
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : List[str] = True
return True
lowerCAmelCase_ : Optional[Any] = (left_element + right_element) // 2
self.update(self.left(__lowercase ) , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
self.update(self.right(__lowercase ) , mid + 1 , __lowercase , __lowercase , __lowercase , __lowercase )
lowerCAmelCase_ : int = max(
self.segment_tree[self.left(__lowercase )] , self.segment_tree[self.right(__lowercase )] )
return True
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> int | float:
if self.flag[idx] is True:
lowerCAmelCase_ : Union[str, Any] = self.lazy[idx]
lowerCAmelCase_ : Optional[Any] = False
if left_element != right_element:
lowerCAmelCase_ : List[Any] = self.lazy[idx]
lowerCAmelCase_ : Dict = self.lazy[idx]
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : Optional[int] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
lowerCAmelCase_ : List[Any] = (left_element + right_element) // 2
lowerCAmelCase_ : Tuple = self.query(self.left(__lowercase ) , __lowercase , __lowercase , __lowercase , __lowercase )
lowerCAmelCase_ : List[Any] = self.query(self.right(__lowercase ) , mid + 1 , __lowercase , __lowercase , __lowercase )
return max(__lowercase , __lowercase )
def __str__( self ) -> str:
return str([self.query(1 , 1 , self.size , __lowercase , __lowercase ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_UpperCAmelCase : str =[1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
_UpperCAmelCase : List[str] =15
_UpperCAmelCase : Any =SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt) | 262 | 0 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class snake_case__ (pl.LightningModule ):
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : Optional[int] ) -> List[str]:
super().__init__()
a = model
a = 2
a = nn.Linear(self.model.config.hidden_size , self.num_labels )
def __UpperCAmelCase ( self : str ) -> int:
pass
def __magic_name__ ( A : str, A : str, A : str ):
'''simple docstring'''
a = LongformerModel.from_pretrained(A )
a = LightningModel(A )
a = torch.load(A, map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
a = LongformerForQuestionAnswering.from_pretrained(A )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(A )
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCAmelCase : List[str] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 107 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_UpperCAmelCase : Optional[int] ="""src/transformers"""
_UpperCAmelCase : str ="""docs/source/en"""
_UpperCAmelCase : Optional[int] ="""."""
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]:
with open(lowerCAmelCase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase_ : int = f.readlines()
# Find the start prompt.
lowerCAmelCase_ : List[Any] = 0
while not lines[start_index].startswith(lowerCAmelCase_ ):
start_index += 1
start_index += 1
lowerCAmelCase_ : List[str] = start_index
while not lines[end_index].startswith(lowerCAmelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_UpperCAmelCase : Optional[Any] ="""Model|Encoder|Decoder|ForConditionalGeneration"""
# Regexes that match TF/Flax/PT model names.
_UpperCAmelCase : Optional[int] =re.compile(R"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
_UpperCAmelCase : Dict =re.compile(R"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_UpperCAmelCase : Optional[Any] =re.compile(R"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# This is to make sure the transformers module imported is the one in the repo.
_UpperCAmelCase : Optional[int] =direct_transformers_import(TRANSFORMERS_PATH)
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[int]:
lowerCAmelCase_ : str = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , lowerCAmelCase_ )
return [m.group(0 ) for m in matches]
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
lowerCAmelCase_ : Tuple = 2 if text == '''✅''' or text == '''❌''' else len(lowerCAmelCase_ )
lowerCAmelCase_ : int = (width - text_length) // 2
lowerCAmelCase_ : Union[str, Any] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowerCAmelCase ( )-> str:
lowerCAmelCase_ : Any = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowerCAmelCase_ : Dict = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowerCAmelCase_ : List[Any] = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowerCAmelCase_ : Tuple = collections.defaultdict(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = collections.defaultdict(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[Any] = collections.defaultdict(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = collections.defaultdict(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = collections.defaultdict(lowerCAmelCase_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowerCAmelCase_ ):
lowerCAmelCase_ : Optional[int] = None
if attr_name.endswith('''Tokenizer''' ):
lowerCAmelCase_ : Union[str, Any] = slow_tokenizers
lowerCAmelCase_ : List[str] = attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
lowerCAmelCase_ : int = fast_tokenizers
lowerCAmelCase_ : Union[str, Any] = attr_name[:-13]
elif _re_tf_models.match(lowerCAmelCase_ ) is not None:
lowerCAmelCase_ : Tuple = tf_models
lowerCAmelCase_ : str = _re_tf_models.match(lowerCAmelCase_ ).groups()[0]
elif _re_flax_models.match(lowerCAmelCase_ ) is not None:
lowerCAmelCase_ : Tuple = flax_models
lowerCAmelCase_ : Union[str, Any] = _re_flax_models.match(lowerCAmelCase_ ).groups()[0]
elif _re_pt_models.match(lowerCAmelCase_ ) is not None:
lowerCAmelCase_ : Any = pt_models
lowerCAmelCase_ : List[Any] = _re_pt_models.match(lowerCAmelCase_ ).groups()[0]
if lookup_dict is not None:
while len(lowerCAmelCase_ ) > 0:
if attr_name in model_name_to_prefix.values():
lowerCAmelCase_ : Union[str, Any] = True
break
# Try again after removing the last word in the name
lowerCAmelCase_ : Any = ''''''.join(camel_case_split(lowerCAmelCase_ )[:-1] )
# Let's build that table!
lowerCAmelCase_ : int = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
lowerCAmelCase_ : Tuple = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowerCAmelCase_ : Union[str, Any] = [len(lowerCAmelCase_ ) + 2 for c in columns]
lowerCAmelCase_ : Optional[Any] = max([len(lowerCAmelCase_ ) for name in model_names] ) + 2
# Build the table per se
lowerCAmelCase_ : Dict = '''|''' + '''|'''.join([_center_text(lowerCAmelCase_ , lowerCAmelCase_ ) for c, w in zip(lowerCAmelCase_ , lowerCAmelCase_ )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
lowerCAmelCase_ : List[str] = {True: '''✅''', False: '''❌'''}
for name in model_names:
lowerCAmelCase_ : List[Any] = model_name_to_prefix[name]
lowerCAmelCase_ : Union[str, Any] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowerCAmelCase_ , lowerCAmelCase_ ) for l, w in zip(lowerCAmelCase_ , lowerCAmelCase_ )] ) + "|\n"
return table
def lowerCAmelCase ( lowerCAmelCase_=False )-> Tuple:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = _find_text_in_file(
filename=os.path.join(lowerCAmelCase_ , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , )
lowerCAmelCase_ : Tuple = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowerCAmelCase_ , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] =argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_UpperCAmelCase : Tuple =parser.parse_args()
check_model_table(args.fix_and_overwrite) | 262 | 0 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=99 , snake_case__=13 , snake_case__=7 , snake_case__=9 , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__=8 , snake_case__=0.1 , snake_case__=0.002 , snake_case__=1 , snake_case__=0 , snake_case__=0 , snake_case__=None , snake_case__=None , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = parent
lowerCAmelCase : List[Any] = batch_size
lowerCAmelCase : str = encoder_seq_length
lowerCAmelCase : List[str] = decoder_seq_length
# For common tests
lowerCAmelCase : Dict = self.decoder_seq_length
lowerCAmelCase : List[str] = is_training
lowerCAmelCase : str = use_attention_mask
lowerCAmelCase : Union[str, Any] = use_labels
lowerCAmelCase : List[Any] = vocab_size
lowerCAmelCase : Optional[int] = hidden_size
lowerCAmelCase : Optional[Any] = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : List[Any] = d_ff
lowerCAmelCase : List[str] = relative_attention_num_buckets
lowerCAmelCase : Dict = dropout_rate
lowerCAmelCase : List[str] = initializer_factor
lowerCAmelCase : List[Any] = eos_token_id
lowerCAmelCase : Tuple = pad_token_id
lowerCAmelCase : str = decoder_start_token_id
lowerCAmelCase : Dict = None
lowerCAmelCase : Dict = decoder_layers
def lowercase__ ( self ):
"""simple docstring"""
return TaConfig.from_pretrained("google/umt5-base" )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , ):
"""simple docstring"""
if attention_mask is None:
lowerCAmelCase : Dict = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCAmelCase : List[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCAmelCase : Union[str, Any] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=snake_case__ )
if decoder_head_mask is None:
lowerCAmelCase : str = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=snake_case__ )
if cross_attn_head_mask is None:
lowerCAmelCase : Optional[int] = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=snake_case__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCAmelCase : Optional[int] = input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase : List[str] = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase : Any = self.get_config()
lowerCAmelCase : Optional[int] = config.num_attention_heads
lowerCAmelCase : Union[str, Any] = self.prepare_inputs_dict(snake_case__ , snake_case__ , snake_case__ )
return config, input_dict
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowercase__ ( self ):
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : int = UMTaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Union[str, Any] = model(
input_ids=snake_case__ , decoder_input_ids=snake_case__ , attention_mask=snake_case__ , decoder_attention_mask=snake_case__ , )
lowerCAmelCase : List[str] = model(input_ids=snake_case__ , decoder_input_ids=snake_case__ )
lowerCAmelCase : int = result.last_hidden_state
lowerCAmelCase : Tuple = result.past_key_values
lowerCAmelCase : List[str] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(snake_case__ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = UMTaModel(config=snake_case__ ).get_decoder().to(snake_case__ ).eval()
# first forward pass
lowerCAmelCase : Tuple = model(snake_case__ , use_cache=snake_case__ )
lowerCAmelCase : Optional[Any] = model(snake_case__ )
lowerCAmelCase : List[Any] = model(snake_case__ , use_cache=snake_case__ )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) + 1 )
lowerCAmelCase , lowerCAmelCase : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase : Any = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
lowerCAmelCase : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase : Optional[int] = model(snake_case__ )["last_hidden_state"]
lowerCAmelCase : Any = model(snake_case__ , past_key_values=snake_case__ )["last_hidden_state"]
# select random slice
lowerCAmelCase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase : List[Any] = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCAmelCase : List[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
def lowercase__ ( self , snake_case__ , snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : int = UMTaModel(config=snake_case__ ).to(snake_case__ ).half().eval()
lowerCAmelCase : Union[str, Any] = model(**snake_case__ )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(snake_case__ ).any().item() )
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : Dict =(
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
a : int =(UMTaForConditionalGeneration,) if is_torch_available() else ()
a : Any =(
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
a : int =True
a : Union[str, Any] =False
a : List[str] =False
a : str =True
a : str =True
# The small UMT5 model needs higher percentages for CPU/MP tests
a : Tuple =[0.8, 0.9]
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase : Any = UMTaModel(config_and_inputs[0] ).to(snake_case__ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
snake_case__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"""{tmpdirname}/t5_test.onnx""" , export_params=snake_case__ , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase : List[str] = config_and_inputs[0]
lowerCAmelCase : List[str] = UMTaForConditionalGeneration(snake_case__ ).eval()
model.to(snake_case__ )
lowerCAmelCase : List[Any] = {
"head_mask": torch.zeros(config.num_layers , config.num_heads , device=snake_case__ ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=snake_case__ ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=snake_case__ ),
}
for attn_name, (name, mask) in zip(snake_case__ , head_masking.items() ):
lowerCAmelCase : Tuple = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowerCAmelCase : Optional[Any] = torch.ones(
config.num_decoder_layers , config.num_heads , device=snake_case__ )
lowerCAmelCase : int = model.generate(
config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=snake_case__ , return_dict_in_generate=snake_case__ , **snake_case__ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowerCAmelCase : Any = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=snake_case__ ).to(snake_case__ )
lowerCAmelCase : Dict = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=snake_case__ , legacy=snake_case__ )
lowerCAmelCase : int = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
lowerCAmelCase : str = tokenizer(snake_case__ , return_tensors="pt" , padding=snake_case__ ).input_ids
# fmt: off
lowerCAmelCase : Tuple = torch.tensor(
[
[ 38_530, 210_703, 256_299, 1_410, 256_298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25_922, 256_299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_460, 339, 312, 19_014, 10_620, 758, 256_299, 2_355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256_299, 14_869, 281, 301, 256_298, 275, 119_983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256_299, 14_869, 281, 2_234, 289, 2_275, 333,61_391, 289, 256_298, 543, 256_297, 168_714, 329, 256_296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(snake_case__ , snake_case__ )
lowerCAmelCase : Any = model.generate(input_ids.to(snake_case__ ) )
lowerCAmelCase : Optional[Any] = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
lowerCAmelCase : int = tokenizer.batch_decode(snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
| 108 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCAmelCase ( )-> int:
lowerCAmelCase_ : int = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
lowerCAmelCase_ : Dict = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(lowerCAmelCase_ )
DownloadCommand.register_subcommand(lowerCAmelCase_ )
EnvironmentCommand.register_subcommand(lowerCAmelCase_ )
RunCommand.register_subcommand(lowerCAmelCase_ )
ServeCommand.register_subcommand(lowerCAmelCase_ )
UserCommands.register_subcommand(lowerCAmelCase_ )
AddNewModelCommand.register_subcommand(lowerCAmelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCAmelCase_ )
LfsCommands.register_subcommand(lowerCAmelCase_ )
PTtoTFCommand.register_subcommand(lowerCAmelCase_ )
# Let's go
lowerCAmelCase_ : Union[str, Any] = parser.parse_args()
if not hasattr(lowerCAmelCase_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
lowerCAmelCase_ : List[Any] = args.func(lowerCAmelCase_ )
service.run()
if __name__ == "__main__":
main() | 262 | 0 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _snake_case ( UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
UpperCAmelCase : int = ksize + 1
UpperCAmelCase : Any = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(UpperCamelCase ):
for x in range(UpperCamelCase ):
# distance from center
UpperCAmelCase : int = x - ksize // 2
UpperCAmelCase : Optional[int] = y - ksize // 2
# degree to radiant
UpperCAmelCase : List[str] = theta / 180 * np.pi
UpperCAmelCase : str = np.cos(_theta )
UpperCAmelCase : Optional[Any] = np.sin(_theta )
# get kernel x
UpperCAmelCase : List[Any] = cos_theta * px + sin_theta * py
# get kernel y
UpperCAmelCase : Any = -sin_theta * px + cos_theta * py
# fill kernel
UpperCAmelCase : Any = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
A: Any = imread("../image_data/lena.jpg")
# turn image in gray scale value
A: Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
A: List[str] = np.zeros(gray.shape[:2])
for theta in [0, 3_0, 6_0, 9_0, 1_2_0, 1_5_0]:
A: List[str] = gabor_filter_kernel(1_0, 8, theta, 1_0, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
A: str = out / out.max() * 2_5_5
A: List[Any] = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 109 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_UpperCAmelCase : Tuple =None
_UpperCAmelCase : int =logging.get_logger(__name__)
_UpperCAmelCase : Dict ={"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Any ={
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : int ={
"""facebook/nllb-large-en-ro""": 1024,
"""facebook/nllb-200-distilled-600M""": 1024,
}
# fmt: off
_UpperCAmelCase : Any =["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE__ : int = NllbTokenizer
SCREAMING_SNAKE_CASE__ : List[int] = []
SCREAMING_SNAKE_CASE__ : List[int] = []
def __init__( self , __lowercase=None , __lowercase=None , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=False , **__lowercase , ) -> int:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : int = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
lowerCAmelCase_ : List[Any] = legacy_behaviour
super().__init__(
vocab_file=__lowercase , tokenizer_file=__lowercase , bos_token=__lowercase , eos_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , src_lang=__lowercase , tgt_lang=__lowercase , additional_special_tokens=__lowercase , legacy_behaviour=__lowercase , **__lowercase , )
lowerCAmelCase_ : Any = vocab_file
lowerCAmelCase_ : List[Any] = False if not self.vocab_file else True
lowerCAmelCase_ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
lowerCAmelCase_ : Optional[Any] = {
lang_code: self.convert_tokens_to_ids(__lowercase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCAmelCase_ : Any = src_lang if src_lang is not None else '''eng_Latn'''
lowerCAmelCase_ : str = self.convert_tokens_to_ids(self._src_lang )
lowerCAmelCase_ : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowercase_ ( self ) -> str:
return self._src_lang
@src_lang.setter
def lowercase_ ( self , __lowercase ) -> None:
lowerCAmelCase_ : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , **__lowercase ) -> str:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowerCAmelCase_ : List[str] = src_lang
lowerCAmelCase_ : int = self(__lowercase , add_special_tokens=__lowercase , return_tensors=__lowercase , **__lowercase )
lowerCAmelCase_ : Dict = self.convert_tokens_to_ids(__lowercase )
lowerCAmelCase_ : List[Any] = tgt_lang_id
return inputs
def lowercase_ ( self , __lowercase , __lowercase = "eng_Latn" , __lowercase = None , __lowercase = "fra_Latn" , **__lowercase , ) -> BatchEncoding:
lowerCAmelCase_ : List[str] = src_lang
lowerCAmelCase_ : List[str] = tgt_lang
return super().prepare_seqaseq_batch(__lowercase , __lowercase , **__lowercase )
def lowercase_ ( self ) -> List[Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def lowercase_ ( self ) -> str:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase_ ( self , __lowercase ) -> None:
lowerCAmelCase_ : List[str] = self.convert_tokens_to_ids(__lowercase )
if self.legacy_behaviour:
lowerCAmelCase_ : Any = []
lowerCAmelCase_ : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase_ : Optional[int] = [self.cur_lang_code]
lowerCAmelCase_ : List[Any] = [self.eos_token_id]
lowerCAmelCase_ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase_ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase_ : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowercase_ ( self , __lowercase ) -> None:
lowerCAmelCase_ : Dict = self.convert_tokens_to_ids(__lowercase )
if self.legacy_behaviour:
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : Any = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase_ : Any = [self.cur_lang_code]
lowerCAmelCase_ : Any = [self.eos_token_id]
lowerCAmelCase_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase_ : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase_ : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
lowerCAmelCase_ : Any = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,) | 262 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = jnp.ones((batch_size, length) ) / length
return scores
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = None
lowerCAmelCase : Tuple = 20
lowerCAmelCase : int = self._get_uniform_logits(batch_size=2 , length=__lowercase )
# tweak scores to not be uniform anymore
lowerCAmelCase : Any = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCAmelCase : str = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCAmelCase : Optional[int] = jax.nn.softmax(__lowercase , axis=-1 )
lowerCAmelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCAmelCase : Optional[int] = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCAmelCase : Any = jax.nn.softmax(temp_dist_warper_sharper(__lowercase , scores.copy() , cur_len=__lowercase ) , axis=-1 )
lowerCAmelCase : Optional[int] = jax.nn.softmax(temp_dist_warper_smoother(__lowercase , scores.copy() , cur_len=__lowercase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = None
lowerCAmelCase : Dict = 10
lowerCAmelCase : int = 2
# create ramp distribution
lowerCAmelCase : Dict = np.broadcast_to(np.arange(__lowercase )[None, :] , (batch_size, vocab_size) ).copy()
lowerCAmelCase : Dict = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCAmelCase : Dict = FlaxTopKLogitsWarper(3 )
lowerCAmelCase : int = top_k_warp(__lowercase , __lowercase , cur_len=__lowercase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCAmelCase : Optional[Any] = 5
lowerCAmelCase : Union[str, Any] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCAmelCase : Dict = np.broadcast_to(np.arange(__lowercase )[None, :] , (batch_size, length) ).copy()
lowerCAmelCase : Any = top_k_warp_safety_check(__lowercase , __lowercase , cur_len=__lowercase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = None
lowerCAmelCase : int = 10
lowerCAmelCase : Optional[int] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCAmelCase : Dict = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowerCAmelCase : int = FlaxTopPLogitsWarper(0.8 )
lowerCAmelCase : int = np.exp(top_p_warp(__lowercase , __lowercase , cur_len=__lowercase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCAmelCase : Union[str, Any] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(__lowercase , __lowercase , atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowerCAmelCase : Dict = np.broadcast_to(np.arange(__lowercase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCAmelCase : Any = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowerCAmelCase : Dict = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCAmelCase : Optional[Any] = top_p_warp(__lowercase , __lowercase , cur_len=__lowercase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = 20
lowerCAmelCase : List[str] = 4
lowerCAmelCase : int = 0
lowerCAmelCase : Union[str, Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__lowercase )
# check that min length is applied at length 5
lowerCAmelCase : Any = ids_tensor((batch_size, 20) , vocab_size=20 )
lowerCAmelCase : Tuple = 5
lowerCAmelCase : Optional[Any] = self._get_uniform_logits(__lowercase , __lowercase )
lowerCAmelCase : Any = min_dist_processor(__lowercase , __lowercase , cur_len=__lowercase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf" )] )
# check that min length is not applied anymore at length 15
lowerCAmelCase : Optional[Any] = self._get_uniform_logits(__lowercase , __lowercase )
lowerCAmelCase : List[str] = 15
lowerCAmelCase : Optional[int] = min_dist_processor(__lowercase , __lowercase , cur_len=__lowercase )
self.assertFalse(jnp.isinf(__lowercase ).any() )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = 20
lowerCAmelCase : Optional[Any] = 4
lowerCAmelCase : Any = 0
lowerCAmelCase : List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__lowercase )
# check that all scores are -inf except the bos_token_id score
lowerCAmelCase : Dict = ids_tensor((batch_size, 1) , vocab_size=20 )
lowerCAmelCase : str = 1
lowerCAmelCase : Optional[int] = self._get_uniform_logits(__lowercase , __lowercase )
lowerCAmelCase : str = logits_processor(__lowercase , __lowercase , cur_len=__lowercase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCAmelCase : List[Any] = 3
lowerCAmelCase : Union[str, Any] = self._get_uniform_logits(__lowercase , __lowercase )
lowerCAmelCase : str = logits_processor(__lowercase , __lowercase , cur_len=__lowercase )
self.assertFalse(jnp.isinf(__lowercase ).any() )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = 20
lowerCAmelCase : List[str] = 4
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Any = 5
lowerCAmelCase : Optional[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=__lowercase , eos_token_id=__lowercase )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCAmelCase : List[Any] = ids_tensor((batch_size, 4) , vocab_size=20 )
lowerCAmelCase : Any = 4
lowerCAmelCase : List[str] = self._get_uniform_logits(__lowercase , __lowercase )
lowerCAmelCase : str = logits_processor(__lowercase , __lowercase , cur_len=__lowercase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCAmelCase : List[Any] = 3
lowerCAmelCase : Optional[int] = self._get_uniform_logits(__lowercase , __lowercase )
lowerCAmelCase : List[Any] = logits_processor(__lowercase , __lowercase , cur_len=__lowercase )
self.assertFalse(jnp.isinf(__lowercase ).any() )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = 4
lowerCAmelCase : Optional[int] = 10
lowerCAmelCase : Dict = 15
lowerCAmelCase : Any = 2
lowerCAmelCase : str = 1
lowerCAmelCase : Optional[int] = 15
# dummy input_ids and scores
lowerCAmelCase : Any = ids_tensor((batch_size, sequence_length) , __lowercase )
lowerCAmelCase : Dict = input_ids.copy()
lowerCAmelCase : Optional[int] = self._get_uniform_logits(__lowercase , __lowercase )
lowerCAmelCase : Any = scores.copy()
# instantiate all dist processors
lowerCAmelCase : str = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCAmelCase : Union[str, Any] = FlaxTopKLogitsWarper(3 )
lowerCAmelCase : Any = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCAmelCase : List[str] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__lowercase )
lowerCAmelCase : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__lowercase )
lowerCAmelCase : Optional[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=__lowercase , eos_token_id=__lowercase )
lowerCAmelCase : int = 10
# no processor list
lowerCAmelCase : str = temp_dist_warp(__lowercase , __lowercase , cur_len=__lowercase )
lowerCAmelCase : List[str] = top_k_warp(__lowercase , __lowercase , cur_len=__lowercase )
lowerCAmelCase : Tuple = top_p_warp(__lowercase , __lowercase , cur_len=__lowercase )
lowerCAmelCase : Tuple = min_dist_proc(__lowercase , __lowercase , cur_len=__lowercase )
lowerCAmelCase : List[str] = bos_dist_proc(__lowercase , __lowercase , cur_len=__lowercase )
lowerCAmelCase : str = eos_dist_proc(__lowercase , __lowercase , cur_len=__lowercase )
# with processor list
lowerCAmelCase : Optional[int] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCAmelCase : Optional[Any] = processor(__lowercase , __lowercase , cur_len=__lowercase )
# scores should be equal
self.assertTrue(jnp.allclose(__lowercase , __lowercase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = 4
lowerCAmelCase : int = 10
lowerCAmelCase : int = 15
lowerCAmelCase : List[Any] = 2
lowerCAmelCase : List[Any] = 1
lowerCAmelCase : int = 15
# dummy input_ids and scores
lowerCAmelCase : Any = ids_tensor((batch_size, sequence_length) , __lowercase )
lowerCAmelCase : Dict = input_ids.copy()
lowerCAmelCase : Tuple = self._get_uniform_logits(__lowercase , __lowercase )
lowerCAmelCase : int = scores.copy()
# instantiate all dist processors
lowerCAmelCase : Any = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCAmelCase : Any = FlaxTopKLogitsWarper(3 )
lowerCAmelCase : List[str] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCAmelCase : Dict = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__lowercase )
lowerCAmelCase : List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__lowercase )
lowerCAmelCase : Any = FlaxForcedEOSTokenLogitsProcessor(max_length=__lowercase , eos_token_id=__lowercase )
lowerCAmelCase : Dict = 10
# no processor list
def run_no_processor_list(snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = temp_dist_warp(__lowercase , __lowercase , cur_len=__lowercase )
lowerCAmelCase : List[Any] = top_k_warp(__lowercase , __lowercase , cur_len=__lowercase )
lowerCAmelCase : int = top_p_warp(__lowercase , __lowercase , cur_len=__lowercase )
lowerCAmelCase : int = min_dist_proc(__lowercase , __lowercase , cur_len=__lowercase )
lowerCAmelCase : Optional[Any] = bos_dist_proc(__lowercase , __lowercase , cur_len=__lowercase )
lowerCAmelCase : int = eos_dist_proc(__lowercase , __lowercase , cur_len=__lowercase )
return scores
# with processor list
def run_processor_list(snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Any = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCAmelCase : Tuple = processor(__lowercase , __lowercase , cur_len=__lowercase )
return scores
lowerCAmelCase : Any = jax.jit(__lowercase )
lowerCAmelCase : str = jax.jit(__lowercase )
lowerCAmelCase : Optional[int] = jitted_run_no_processor_list(__lowercase , __lowercase , __lowercase )
lowerCAmelCase : str = jitted_run_processor_list(__lowercase , __lowercase , __lowercase )
# scores should be equal
self.assertTrue(jnp.allclose(__lowercase , __lowercase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 108 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
_UpperCAmelCase : Optional[Any] =NewType("""DataClass""", Any)
_UpperCAmelCase : Dict =NewType("""DataClassType""", Any)
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def lowerCAmelCase ( lowerCAmelCase_ )-> Callable[[str], Any]:
lowerCAmelCase_ : str = {str(lowerCAmelCase_ ): choice for choice in choices}
return lambda lowerCAmelCase_ : str_to_choice.get(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( *,
lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = dataclasses.MISSING , lowerCAmelCase_ = dataclasses.MISSING , lowerCAmelCase_ = None , **lowerCAmelCase_ , )-> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowerCAmelCase_ : Dict = {}
if aliases is not None:
lowerCAmelCase_ : str = aliases
if help is not None:
lowerCAmelCase_ : Tuple = help
return dataclasses.field(metadata=lowerCAmelCase_ , default=lowerCAmelCase_ , default_factory=lowerCAmelCase_ , **lowerCAmelCase_ )
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Iterable[DataClassType]
def __init__( self , __lowercase , **__lowercase ) -> List[str]:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
lowerCAmelCase_ : Optional[int] = ArgumentDefaultsHelpFormatter
super().__init__(**__lowercase )
if dataclasses.is_dataclass(__lowercase ):
lowerCAmelCase_ : Union[str, Any] = [dataclass_types]
lowerCAmelCase_ : List[Any] = list(__lowercase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__lowercase )
@staticmethod
def lowercase_ ( __lowercase , __lowercase ) -> Union[str, Any]:
lowerCAmelCase_ : Optional[Any] = f"""--{field.name}"""
lowerCAmelCase_ : Tuple = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __lowercase ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
lowerCAmelCase_ : List[str] = kwargs.pop('''aliases''' , [] )
if isinstance(__lowercase , __lowercase ):
lowerCAmelCase_ : Optional[Any] = [aliases]
lowerCAmelCase_ : Any = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(__lowercase , '''UnionType''' ) and isinstance(__lowercase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__lowercase ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
f""" Problem encountered in field '{field.name}'.""" )
if type(__lowercase ) not in field.type.__args__:
# filter `str` in Union
lowerCAmelCase_ : List[Any] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowerCAmelCase_ : Dict = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowerCAmelCase_ : str = (
field.type.__args__[0] if isinstance(__lowercase , field.type.__args__[1] ) else field.type.__args__[1]
)
lowerCAmelCase_ : List[Any] = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowerCAmelCase_ : Dict = {}
if origin_type is Literal or (isinstance(field.type , __lowercase ) and issubclass(field.type , __lowercase )):
if origin_type is Literal:
lowerCAmelCase_ : Optional[Any] = field.type.__args__
else:
lowerCAmelCase_ : int = [x.value for x in field.type]
lowerCAmelCase_ : str = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
lowerCAmelCase_ : str = field.default
else:
lowerCAmelCase_ : Tuple = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowerCAmelCase_ : Tuple = copy(__lowercase )
# Hack because type=bool in argparse does not behave as we want.
lowerCAmelCase_ : Dict = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowerCAmelCase_ : Union[str, Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowerCAmelCase_ : List[str] = default
# This tells argparse we accept 0 or 1 value after --field_name
lowerCAmelCase_ : int = '''?'''
# This is the value that will get picked if we do --field_name (without value)
lowerCAmelCase_ : List[Any] = True
elif isclass(__lowercase ) and issubclass(__lowercase , __lowercase ):
lowerCAmelCase_ : Union[str, Any] = field.type.__args__[0]
lowerCAmelCase_ : Dict = '''+'''
if field.default_factory is not dataclasses.MISSING:
lowerCAmelCase_ : Any = field.default_factory()
elif field.default is dataclasses.MISSING:
lowerCAmelCase_ : Optional[int] = True
else:
lowerCAmelCase_ : List[Any] = field.type
if field.default is not dataclasses.MISSING:
lowerCAmelCase_ : Dict = field.default
elif field.default_factory is not dataclasses.MISSING:
lowerCAmelCase_ : List[Any] = field.default_factory()
else:
lowerCAmelCase_ : int = True
parser.add_argument(__lowercase , *__lowercase , **__lowercase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowerCAmelCase_ : Any = False
parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **__lowercase )
def lowercase_ ( self , __lowercase ) -> List[Any]:
if hasattr(__lowercase , '''_argument_group_name''' ):
lowerCAmelCase_ : str = self.add_argument_group(dtype._argument_group_name )
else:
lowerCAmelCase_ : Dict = self
try:
lowerCAmelCase_ : Dict[str, type] = get_type_hints(__lowercase )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(__lowercase ):
lowerCAmelCase_ : Any = '''.'''.join(map(__lowercase , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(__lowercase ):
if not field.init:
continue
lowerCAmelCase_ : Optional[int] = type_hints[field.name]
self._parse_dataclass_field(__lowercase , __lowercase )
def lowercase_ ( self , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=None , __lowercase=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowerCAmelCase_ : str = []
if args_filename:
args_files.append(Path(__lowercase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowerCAmelCase_ : str = ArgumentParser()
args_file_parser.add_argument(__lowercase , type=__lowercase , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = args_file_parser.parse_known_args(args=__lowercase )
lowerCAmelCase_ : int = vars(__lowercase ).get(args_file_flag.lstrip('''-''' ) , __lowercase )
if cmd_args_file_paths:
args_files.extend([Path(__lowercase ) for p in cmd_args_file_paths] )
lowerCAmelCase_ : Dict = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowerCAmelCase_ : Any = file_args + args if args is not None else file_args + sys.argv[1:]
lowerCAmelCase_ , lowerCAmelCase_ : Dict = self.parse_known_args(args=__lowercase )
lowerCAmelCase_ : Any = []
for dtype in self.dataclass_types:
lowerCAmelCase_ : str = {f.name for f in dataclasses.fields(__lowercase ) if f.init}
lowerCAmelCase_ : str = {k: v for k, v in vars(__lowercase ).items() if k in keys}
for k in keys:
delattr(__lowercase , __lowercase )
lowerCAmelCase_ : Optional[int] = dtype(**__lowercase )
outputs.append(__lowercase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__lowercase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def lowercase_ ( self , __lowercase , __lowercase = False ) -> Tuple[DataClass, ...]:
lowerCAmelCase_ : int = set(args.keys() )
lowerCAmelCase_ : str = []
for dtype in self.dataclass_types:
lowerCAmelCase_ : int = {f.name for f in dataclasses.fields(__lowercase ) if f.init}
lowerCAmelCase_ : List[str] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowerCAmelCase_ : List[str] = dtype(**__lowercase )
outputs.append(__lowercase )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__lowercase )}""" )
return tuple(__lowercase )
def lowercase_ ( self , __lowercase , __lowercase = False ) -> Tuple[DataClass, ...]:
with open(Path(__lowercase ) , encoding='''utf-8''' ) as open_json_file:
lowerCAmelCase_ : Dict = json.loads(open_json_file.read() )
lowerCAmelCase_ : str = self.parse_dict(__lowercase , allow_extra_keys=__lowercase )
return tuple(__lowercase )
def lowercase_ ( self , __lowercase , __lowercase = False ) -> Tuple[DataClass, ...]:
lowerCAmelCase_ : Optional[Any] = self.parse_dict(yaml.safe_load(Path(__lowercase ).read_text() ) , allow_extra_keys=__lowercase )
return tuple(__lowercase ) | 262 | 0 |
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
a_ = False
a_ = logging.get_logger(__name__)
a_ = """ybelkada/fonts"""
def a__ ( ):
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def a__ ( _UpperCamelCase : int ,_UpperCamelCase : Tuple ,_UpperCamelCase : Optional[int] ):
requires_backends(lowerCAmelCase_ ,['''torch'''] )
_check_torch_version()
__lowerCamelCase = image_tensor.unsqueeze(0 )
__lowerCamelCase = torch.nn.functional.unfold(lowerCAmelCase_ ,(patch_height, patch_width) ,stride=(patch_height, patch_width) )
__lowerCamelCase = patches.reshape(image_tensor.size(0 ) ,image_tensor.size(1 ) ,lowerCAmelCase_ ,lowerCAmelCase_ ,-1 )
__lowerCamelCase = patches.permute(0 ,4 ,2 ,3 ,1 ).reshape(
image_tensor.size(2 ) // patch_height ,image_tensor.size(3 ) // patch_width ,image_tensor.size(1 ) * patch_height * patch_width ,)
return patches.unsqueeze(0 )
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : Tuple = 36 ,_UpperCamelCase : Tuple = "black" ,_UpperCamelCase : int = "white" ,_UpperCamelCase : Optional[int] = 5 ,_UpperCamelCase : int = 5 ,_UpperCamelCase : int = 5 ,_UpperCamelCase : Dict = 5 ,_UpperCamelCase : Any = None ,_UpperCamelCase : int = None ,):
requires_backends(lowerCAmelCase_ ,'''vision''' )
# Add new lines so that each line is no more than 80 characters.
__lowerCamelCase = textwrap.TextWrapper(width=80 )
__lowerCamelCase = wrapper.wrap(text=lowerCAmelCase_ )
__lowerCamelCase = '''\n'''.join(lowerCAmelCase_ )
if font_bytes is not None and font_path is None:
__lowerCamelCase = io.BytesIO(lowerCAmelCase_ )
elif font_path is not None:
__lowerCamelCase = font_path
else:
__lowerCamelCase = hf_hub_download(lowerCAmelCase_ ,'''Arial.TTF''' )
__lowerCamelCase = ImageFont.truetype(lowerCAmelCase_ ,encoding='''UTF-8''' ,size=lowerCAmelCase_ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
__lowerCamelCase = ImageDraw.Draw(Image.new('''RGB''' ,(1, 1) ,lowerCAmelCase_ ) )
__lowerCamelCase = temp_draw.textbbox((0, 0) ,lowerCAmelCase_ ,lowerCAmelCase_ )
# Create the actual image with a bit of padding around the text.
__lowerCamelCase = text_width + left_padding + right_padding
__lowerCamelCase = text_height + top_padding + bottom_padding
__lowerCamelCase = Image.new('''RGB''' ,(image_width, image_height) ,lowerCAmelCase_ )
__lowerCamelCase = ImageDraw.Draw(lowerCAmelCase_ )
draw.text(xy=(left_padding, top_padding) ,text=lowerCAmelCase_ ,fill=lowerCAmelCase_ ,font=lowerCAmelCase_ )
return image
def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : Union[str, Any] ,**_UpperCamelCase : str ):
requires_backends(lowerCAmelCase_ ,'''vision''' )
# Convert to PIL image if necessary
__lowerCamelCase = to_pil_image(lowerCAmelCase_ )
__lowerCamelCase = render_text(lowerCAmelCase_ ,**lowerCAmelCase_ )
__lowerCamelCase = max(header_image.width ,image.width )
__lowerCamelCase = int(image.height * (new_width / image.width) )
__lowerCamelCase = int(header_image.height * (new_width / header_image.width) )
__lowerCamelCase = Image.new('''RGB''' ,(new_width, new_height + new_header_height) ,'''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) ,(0, 0) )
new_image.paste(image.resize((new_width, new_height) ) ,(0, new_header_height) )
# Convert back to the original framework if necessary
__lowerCamelCase = to_numpy_array(lowerCAmelCase_ )
if infer_channel_dimension_format(lowerCAmelCase_ ) == ChannelDimension.LAST:
__lowerCamelCase = to_channel_dimension_format(lowerCAmelCase_ ,ChannelDimension.LAST )
return new_image
class __lowerCAmelCase ( UpperCAmelCase__ ):
lowerCAmelCase__ = ["""flattened_patches"""]
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = 2048 , __UpperCAmelCase = False , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__lowercase )
__lowerCamelCase = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
__lowerCamelCase = do_normalize
__lowerCamelCase = do_convert_rgb
__lowerCamelCase = max_patches
__lowerCamelCase = is_vqa
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self.extract_flattened_patches , '''torch''' )
_check_torch_version()
# convert to torch
__lowerCamelCase = to_channel_dimension_format(__lowercase , ChannelDimension.FIRST )
__lowerCamelCase = torch.from_numpy(__lowercase )
__lowerCamelCase = patch_size['''height'''], patch_size['''width''']
__lowerCamelCase = get_image_size(__lowercase )
# maximize scale s.t.
__lowerCamelCase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
__lowerCamelCase = max(min(math.floor(scale * image_height / patch_height ) , __lowercase ) , 1 )
__lowerCamelCase = max(min(math.floor(scale * image_width / patch_width ) , __lowercase ) , 1 )
__lowerCamelCase = max(num_feasible_rows * patch_height , 1 )
__lowerCamelCase = max(num_feasible_cols * patch_width , 1 )
__lowerCamelCase = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='''bilinear''' , align_corners=__lowercase , antialias=__lowercase , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
__lowerCamelCase = torch_extract_patches(__lowercase , __lowercase , __lowercase )
__lowerCamelCase = patches.shape
__lowerCamelCase = patches_shape[1]
__lowerCamelCase = patches_shape[2]
__lowerCamelCase = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
__lowerCamelCase = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
__lowerCamelCase = torch.arange(__lowercase ).reshape([rows, 1] ).repeat(1 , __lowercase ).reshape([rows * columns, 1] )
__lowerCamelCase = torch.arange(__lowercase ).reshape([1, columns] ).repeat(__lowercase , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
__lowerCamelCase = row_ids.to(torch.floataa )
__lowerCamelCase = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
__lowerCamelCase = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
__lowerCamelCase = torch.nn.functional.pad(__lowercase , [0, 0, 0, max_patches - (rows * columns)] ).float()
__lowerCamelCase = to_numpy_array(__lowercase )
return result
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase ):
'''simple docstring'''
if image.dtype == np.uinta:
__lowerCamelCase = image.astype(np.floataa )
# take mean across the whole `image`
__lowerCamelCase = np.mean(__lowercase )
__lowerCamelCase = np.std(__lowercase )
__lowerCamelCase = max(__lowercase , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(__lowercase , mean=__lowercase , std=__lowercase , **__lowercase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowerCamelCase = patch_size if patch_size is not None else self.patch_size
__lowerCamelCase = max_patches if max_patches is not None else self.max_patches
__lowerCamelCase = self.is_vqa
if kwargs.get('''data_format''' , __lowercase ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
__lowerCamelCase = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowerCamelCase = [convert_to_rgb(__lowercase ) for image in images]
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(__lowercase ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
__lowerCamelCase = kwargs.pop('''font_bytes''' , __lowercase )
__lowerCamelCase = kwargs.pop('''font_path''' , __lowercase )
if isinstance(__lowercase , __lowercase ):
__lowerCamelCase = [header_text] * len(__lowercase )
__lowerCamelCase = [
render_header(__lowercase , header_text[i] , font_bytes=__lowercase , font_path=__lowercase )
for i, image in enumerate(__lowercase )
]
if do_normalize:
__lowerCamelCase = [self.normalize(image=__lowercase ) for image in images]
# convert to torch tensor and permute
__lowerCamelCase = [
self.extract_flattened_patches(image=__lowercase , max_patches=__lowercase , patch_size=__lowercase )
for image in images
]
# create attention mask in numpy
__lowerCamelCase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
__lowerCamelCase = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} , tensor_type=__lowercase )
return encoded_outputs
| 330 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def lowerCAmelCase ( lowerCAmelCase_ )-> Union[str, Any]:
return EnvironmentCommand()
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
@staticmethod
def lowercase_ ( __lowercase ) -> List[Any]:
lowerCAmelCase_ : List[str] = parser.add_parser('''env''' )
download_parser.set_defaults(func=__lowercase )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Optional[Any] = huggingface_hub.__version__
lowerCAmelCase_ : str = '''not installed'''
lowerCAmelCase_ : str = '''NA'''
if is_torch_available():
import torch
lowerCAmelCase_ : Any = torch.__version__
lowerCAmelCase_ : str = torch.cuda.is_available()
lowerCAmelCase_ : List[str] = '''not installed'''
if is_transformers_available():
import transformers
lowerCAmelCase_ : Any = transformers.__version__
lowerCAmelCase_ : Optional[Any] = '''not installed'''
if is_accelerate_available():
import accelerate
lowerCAmelCase_ : List[Any] = accelerate.__version__
lowerCAmelCase_ : List[str] = '''not installed'''
if is_xformers_available():
import xformers
lowerCAmelCase_ : Optional[Any] = xformers.__version__
lowerCAmelCase_ : int = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(__lowercase ) )
return info
@staticmethod
def lowercase_ ( __lowercase ) -> str:
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n" | 262 | 0 |
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __A ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase : str = """umt5"""
lowerCAmelCase : str = ["""past_key_values"""]
def __init__( self : List[str] ,_snake_case : Optional[Any]=250_112 ,_snake_case : Union[str, Any]=512 ,_snake_case : Dict=64 ,_snake_case : List[Any]=1_024 ,_snake_case : Dict=8 ,_snake_case : Tuple=None ,_snake_case : Any=6 ,_snake_case : Tuple=32 ,_snake_case : Any=128 ,_snake_case : Any=0.1 ,_snake_case : Union[str, Any]=1e-6 ,_snake_case : Tuple=1.0 ,_snake_case : List[Any]="gated-gelu" ,_snake_case : Optional[Any]=True ,_snake_case : int=True ,_snake_case : Optional[int]="T5Tokenizer" ,_snake_case : Any=True ,_snake_case : Any=0 ,_snake_case : Optional[int]=1 ,_snake_case : Union[str, Any]=0 ,**_snake_case : Tuple ,) -> str:
"""simple docstring"""
super().__init__(
is_encoder_decoder=__lowercase ,tokenizer_class=__lowercase ,tie_word_embeddings=__lowercase ,pad_token_id=__lowercase ,eos_token_id=__lowercase ,decoder_start_token_id=__lowercase ,**__lowercase ,)
lowercase__ : Any = vocab_size
lowercase__ : Optional[Any] = d_model
lowercase__ : Tuple = d_kv
lowercase__ : List[str] = d_ff
lowercase__ : Union[str, Any] = num_layers
lowercase__ : Dict = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase__ : Dict = num_heads
lowercase__ : int = relative_attention_num_buckets
lowercase__ : str = relative_attention_max_distance
lowercase__ : int = dropout_rate
lowercase__ : Optional[Any] = layer_norm_epsilon
lowercase__ : Tuple = initializer_factor
lowercase__ : List[str] = feed_forward_proj
lowercase__ : str = use_cache
lowercase__ : List[str] = self.feed_forward_proj.split('''-''' )
lowercase__ : int = act_info[-1]
lowercase__ : List[Any] = act_info[0] == '''gated'''
if len(__lowercase ) > 1 and act_info[0] != "gated" or len(__lowercase ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
if feed_forward_proj == "gated-gelu":
lowercase__ : int = '''gelu_new'''
@property
def UpperCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
return self.d_model
@property
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.num_heads
@property
def UpperCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
return self.num_layers
class __A ( UpperCAmelCase__ ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def UpperCAmelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
lowercase__ : List[str] = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
lowercase__ : List[str] = '''past_encoder_sequence + sequence'''
lowercase__ : int = {0: '''batch'''}
lowercase__ : List[str] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowercase__ : str = {0: '''batch''', 1: '''decoder_sequence'''}
lowercase__ : int = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__lowercase ,direction='''inputs''' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def UpperCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
return 13
@property
def UpperCAmelCase ( self : List[str] ) -> float:
"""simple docstring"""
return 5e-4
| 16 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class snake_case__( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = JukeboxTokenizer
SCREAMING_SNAKE_CASE__ : int = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def lowercase_ ( self ) -> Union[str, Any]:
import torch
lowerCAmelCase_ : Union[str, Any] = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
lowerCAmelCase_ : Any = tokenizer(**self.metas )['''input_ids''']
# fmt: off
lowerCAmelCase_ : List[str] = [
torch.tensor([[
0, 0, 0, 7_1_6_9, 5_0_7, 9, 7_6, 3_9, 3_1, 4_6, 7_6, 2_7,
7_6, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8, 3_1, 4_4, 7_6, 3_2,
4_4, 4_1, 3_9, 7_6, 2_7, 4_0, 7_6, 2_7, 4_0, 4_6, 3_5, 4_3,
4_7, 3_1, 7_6, 3_8, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 4_1, 7_6, 4_5, 2_7, 3_5,
3_0, 7_6, 7_1, 2_0, 4_9, 4_1, 7_6, 4_8, 2_7, 4_5, 4_6, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 4_4, 4_7, 4_0, 3_7, 3_8, 3_1, 4_5,
4_5, 7_6, 3_8, 3_1, 3_3, 4_5, 7_6, 4_1, 3_2, 7_6, 4_5, 4_6,
4_1, 4_0, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
1_9, 4_6, 2_7, 4_0, 3_0, 7_6, 3_5, 4_0, 7_6, 4_6, 3_4, 3_1,
7_6, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3, 7_6, 6_3, 7_6, 6_3,
7_6, 6_3, 7_6, 1_4, 3_1, 2_7, 4_4, 7_6, 4_6, 3_4, 3_1, 3_9,
6_4, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_5, 2_7, 4_0,
3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 8,
2_7, 3_8, 3_2, 7_6, 4_5, 4_7, 4_0, 3_7, 7_6, 2_7, 7_6, 4_5,
3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0, 7_6, 4_8, 3_5, 4_5,
2_7, 3_3, 3_1, 7_6, 3_8, 3_5, 3_1, 4_5, 6_4, 7_6, 4_9, 3_4,
4_1, 4_5, 3_1, 7_6, 3_2, 4_4, 4_1, 4_9, 4_0, 6_4, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6, 4_9,
4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_6, 3_8, 3_5, 4_2, 6_4,
7_6, 2_7, 4_0, 3_0, 7_6, 4_5, 4_0, 3_1, 3_1, 4_4, 7_6, 4_1,
3_2, 7_6, 2_9, 4_1, 3_8, 3_0, 7_6, 2_9, 4_1, 3_9, 3_9, 2_7,
4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_5, 4_6,
4_5, 7_6, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6, 4_1, 4_4, 7_6, 4_9,
3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 4_1, 4_5, 3_1, 7_6, 4_2, 2_7,
4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_6, 4_4, 3_1, 2_7, 3_0, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 3_5, 2_9,
3_4, 7_6, 5_1, 3_1, 4_6, 7_6, 4_5, 4_7, 4_4, 4_8, 3_5, 4_8,
3_1, 6_4, 7_6, 4_5, 4_6, 2_7, 3_9, 4_2, 3_1, 3_0, 7_6, 4_1,
4_0, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 3_8, 3_5, 3_2, 3_1,
3_8, 3_1, 4_5, 4_5, 7_6, 4_6, 3_4, 3_5, 4_0, 3_3, 4_5, 6_4,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_4, 3_1,
7_6, 3_4, 2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_9,
4_1, 2_9, 3_7, 3_1, 3_0, 7_6, 4_6, 3_4, 3_1, 3_9, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_4, 3_1, 2_7, 4_4,
4_6, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_2, 3_1, 3_0, 6_6, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6,
4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_2, 3_1, 3_0, 3_1, 4_5,
4_6, 2_7, 3_8, 6_4, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 4_9,
4_1, 4_4, 3_0, 4_5, 7_6, 2_7, 4_2, 4_2, 3_1, 2_7, 4_4, 6_5,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_3, 5_1, 7_6,
4_0, 2_7, 3_9, 3_1, 7_6, 3_5, 4_5, 7_6, 1_5, 5_2, 5_1, 3_9,
2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_6, 1_1, 3_5, 4_0, 3_3,
7_6, 4_1, 3_2, 7_6, 1_1, 3_5, 4_0, 3_3, 4_5, 6_6, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_2, 4_1, 4_1, 3_7, 7_6,
4_1, 4_0, 7_6, 3_9, 5_1, 7_6, 2_3, 4_1, 4_4, 3_7, 4_5, 6_4,
7_6, 5_1, 3_1, 7_6, 1_3, 3_5, 3_3, 3_4, 4_6, 5_1, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 3_0, 3_1, 4_5, 4_2, 2_7, 3_5, 4_4, 6_7,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_4, 4_1, 4_6,
3_4, 3_5, 4_0, 3_3, 7_6, 2_8, 3_1, 4_5, 3_5, 3_0, 3_1, 7_6,
4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3, 7_6, 1_8, 4_1, 4_7,
4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_0, 3_1, 2_9, 2_7, 5_1,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_5, 3_2, 7_6,
4_6, 3_4, 2_7, 4_6, 7_6, 2_9, 4_1, 3_8, 4_1, 4_5, 4_5, 2_7,
3_8, 7_6, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4, 7_6, 2_8, 4_1, 4_7,
4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_6, 2_7, 4_0, 3_0, 7_6, 2_8,
2_7, 4_4, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_4, 3_1, 7_6, 3_8, 4_1, 4_0, 3_1, 7_6, 2_7, 4_0, 3_0,
7_6, 3_8, 3_1, 4_8, 3_1, 3_8, 7_6, 4_5, 2_7, 4_0, 3_0, 4_5,
7_6, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4, 7_6, 3_2, 2_7, 4_4,
7_6, 2_7, 4_9, 2_7, 5_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
7_6, 7_6]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def lowercase_ ( self ) -> List[Any]:
import torch
lowerCAmelCase_ : Any = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
lowerCAmelCase_ : str = tokenizer(**self.metas )['''input_ids''']
# fmt: off
lowerCAmelCase_ : Tuple = [
torch.tensor([[
0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1, 9, 7_7, 3_9,
3_1, 4_6, 7_7, 2_7, 7_7, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8,
3_1, 4_4, 7_7, 3_2, 4_4, 4_1, 3_9, 7_7, 2_7, 4_0, 7_7, 2_7,
4_0, 4_6, 3_5, 4_3, 4_7, 3_1, 7_7, 3_8, 2_7, 4_0, 3_0, 6_4,
7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_3, 3_4, 4_1,
7_7, 4_5, 2_7, 3_5, 3_0, 7_7, 7_2, 2_0, 4_9, 4_1, 7_7, 4_8,
2_7, 4_5, 4_6, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 4_4, 4_7, 4_0,
3_7, 3_8, 3_1, 4_5, 4_5, 7_7, 3_8, 3_1, 3_3, 4_5, 7_7, 4_1,
3_2, 7_7, 4_5, 4_6, 4_1, 4_0, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 1_9, 4_6, 2_7, 4_0, 3_0, 7_7, 3_5, 4_0,
7_7, 4_6, 3_4, 3_1, 7_7, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3,
7_7, 6_3, 7_7, 6_3, 7_7, 6_3, 7_7, 1_4, 3_1, 2_7, 4_4, 7_7,
4_6, 3_4, 3_1, 3_9, 6_4, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1,
7_7, 4_5, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 8, 2_7, 3_8, 3_2, 7_7, 4_5, 4_7, 4_0, 3_7,
7_7, 2_7, 7_7, 4_5, 3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0,
7_7, 4_8, 3_5, 4_5, 2_7, 3_3, 3_1, 7_7, 3_8, 3_5, 3_1, 4_5,
6_4, 7_7, 4_9, 3_4, 4_1, 4_5, 3_1, 7_7, 3_2, 4_4, 4_1, 4_9,
4_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1,
4_0, 3_0, 7_7, 4_9, 4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_7,
3_8, 3_5, 4_2, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_5, 4_0, 3_1,
3_1, 4_4, 7_7, 4_1, 3_2, 7_7, 2_9, 4_1, 3_8, 3_0, 7_7, 2_9,
4_1, 3_9, 3_9, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 2_7,
4_6, 7_7, 3_5, 4_6, 4_5, 7_7, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6,
4_1, 4_4, 7_7, 4_9, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 4_1, 4_5,
3_1, 7_7, 4_2, 2_7, 4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_7, 4_4,
3_1, 2_7, 3_0, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
2_3, 3_4, 3_5, 2_9, 3_4, 7_7, 5_1, 3_1, 4_6, 7_7, 4_5, 4_7,
4_4, 4_8, 3_5, 4_8, 3_1, 6_4, 7_7, 4_5, 4_6, 2_7, 3_9, 4_2,
3_1, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 4_5, 3_1, 7_7,
3_8, 3_5, 3_2, 3_1, 3_8, 3_1, 4_5, 4_5, 7_7, 4_6, 3_4, 3_5,
4_0, 3_3, 4_5, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 2_0, 3_4, 3_1, 7_7, 3_4, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4,
2_7, 4_6, 7_7, 3_9, 4_1, 2_9, 3_7, 3_1, 3_0, 7_7, 4_6, 3_4,
3_1, 3_9, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7,
3_4, 3_1, 2_7, 4_4, 4_6, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_2,
3_1, 3_0, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
1, 4_0, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 4_2,
3_1, 3_0, 3_1, 4_5, 4_6, 2_7, 3_8, 6_4, 7_7, 4_6, 3_4, 3_1,
4_5, 3_1, 7_7, 4_9, 4_1, 4_4, 3_0, 4_5, 7_7, 2_7, 4_2, 4_2,
3_1, 2_7, 4_4, 6_5, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_3, 5_1, 7_7, 4_0, 2_7, 3_9, 3_1, 7_7, 3_5, 4_5, 7_7,
1_5, 5_2, 5_1, 3_9, 2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_7,
1_1, 3_5, 4_0, 3_3, 7_7, 4_1, 3_2, 7_7, 1_1, 3_5, 4_0, 3_3,
4_5, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_2,
4_1, 4_1, 3_7, 7_7, 4_1, 4_0, 7_7, 3_9, 5_1, 7_7, 2_3, 4_1,
4_4, 3_7, 4_5, 6_4, 7_7, 5_1, 3_1, 7_7, 1_3, 3_5, 3_3, 3_4,
4_6, 5_1, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 3_0, 3_1, 4_5, 4_2,
2_7, 3_5, 4_4, 6_7, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_4, 4_1, 4_6, 3_4, 3_5, 4_0, 3_3, 7_7, 2_8, 3_1, 4_5,
3_5, 3_0, 3_1, 7_7, 4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3,
7_7, 1_8, 4_1, 4_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_0,
3_1, 2_9, 2_7, 5_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_5, 3_2, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 2_9, 4_1, 3_8,
4_1, 4_5, 4_5, 2_7, 3_8, 7_7, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4,
7_7, 2_8, 4_1, 4_7, 4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_7, 2_7,
4_0, 3_0, 7_7, 2_8, 2_7, 4_4, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_4, 3_1, 7_7, 3_8, 4_1, 4_0, 3_1,
7_7, 2_7, 4_0, 3_0, 7_7, 3_8, 3_1, 4_8, 3_1, 3_8, 7_7, 4_5,
2_7, 4_0, 3_0, 4_5, 7_7, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4,
7_7, 3_2, 2_7, 4_4, 7_7, 2_7, 4_9, 2_7, 5_1, 7_9, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 7_7, 7_7]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) | 262 | 0 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , ) -> int:
a : str = parent
a : List[str] = 13
a : Any = 7
a : List[Any] = 30
a : str = self.seq_length + self.mem_len
a : str = 15
a : List[str] = True
a : int = True
a : List[Any] = 99
a : Optional[int] = [10, 50, 80]
a : str = 32
a : Union[str, Any] = 32
a : Any = 4
a : int = 8
a : Union[str, Any] = 128
a : Dict = 2
a : List[Any] = 2
a : Union[str, Any] = None
a : List[str] = 1
a : Optional[int] = 0
a : Dict = 3
a : Any = self.vocab_size - 1
a : int = 0.01
def __a ( self ) -> List[Any]:
a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : int = None
if self.use_labels:
a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : str = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __a ( self ) -> List[str]:
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
a : int = TFTransfoXLModel(__lowercase )
a : Optional[Any] = model(__lowercase ).to_tuple()
a : str = {'''input_ids''': input_ids_a, '''mems''': mems_a}
a : Union[str, Any] = model(__lowercase ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
a : Optional[Any] = TFTransfoXLLMHeadModel(__lowercase )
a : List[Any] = model(__lowercase ).to_tuple()
a : int = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
a : Any = model(__lowercase ).to_tuple()
a : Any = model([input_ids_a, mems_a] ).to_tuple()
a : List[str] = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
a : Optional[int] = model(__lowercase ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
a : Dict = TFTransfoXLForSequenceClassification(__lowercase )
a : str = model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self ) -> str:
a : Dict = self.prepare_config_and_inputs()
(a) : Optional[int] = config_and_inputs
a : int = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : Optional[int] =(
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
lowerCamelCase : int =() if is_tf_available() else ()
lowerCamelCase : str =(
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
lowerCamelCase : str =False
lowerCamelCase : str =False
lowerCamelCase : int =False
lowerCamelCase : int =False
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __a ( self ) -> Union[str, Any]:
a : List[Any] = TFTransfoXLModelTester(self )
a : Optional[Any] = ConfigTester(self , config_class=__lowercase , d_embed=37 )
def __a ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def __a ( self ) -> Optional[Any]:
self.model_tester.set_seed()
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*__lowercase )
def __a ( self ) -> Tuple:
self.model_tester.set_seed()
a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*__lowercase )
def __a ( self ) -> Tuple:
a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*__lowercase )
def __a ( self ) -> List[str]:
a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
a : str = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
a : int = model_class(__lowercase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
a : List[Any] = model.get_output_embeddings()
assert isinstance(__lowercase , tf.keras.layers.Layer )
a : List[str] = model.get_bias()
assert name is None
else:
a : str = model.get_output_embeddings()
assert x is None
a : int = model.get_bias()
assert name is None
def __a ( self ) -> Optional[Any]:
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __a ( self ) -> Any:
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : int = TFTransfoXLModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
@unittest.skip(reason="This model doesn\'t play well with fit() due to not returning a single loss." )
def __a ( self ) -> Dict:
pass
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@unittest.skip("Skip test until #12651 is resolved." )
@slow
def __a ( self ) -> List[Any]:
a : Any = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103" )
# fmt: off
a : Tuple = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
a : int = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
a : Tuple = model.generate(__lowercase , max_length=200 , do_sample=__lowercase )
self.assertListEqual(output_ids[0].numpy().tolist() , __lowercase )
| 105 |
from __future__ import annotations
import requests
def lowerCAmelCase ( lowerCAmelCase_ )-> dict:
lowerCAmelCase_ : List[Any] = f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(lowerCAmelCase_ ).json()
def lowerCAmelCase ( lowerCAmelCase_ = 10 )-> list[dict]:
lowerCAmelCase_ : List[Any] = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
lowerCAmelCase_ : Tuple = requests.get(lowerCAmelCase_ ).json()[:max_stories]
return [get_hackernews_story(lowerCAmelCase_ ) for story_id in story_ids]
def lowerCAmelCase ( lowerCAmelCase_ = 10 )-> str:
lowerCAmelCase_ : Optional[Any] = hackernews_top_stories(lowerCAmelCase_ )
return "\n".join('''* [{title}]({url})'''.format(**lowerCAmelCase_ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown()) | 262 | 0 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_UpperCamelCase: List[str] = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowercase__ ( _UpperCAmelCase ) -> Any:
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if args.student_type == "roberta":
lowercase : Tuple = False
elif args.student_type == "gpt2":
lowercase : List[str] = False
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
if args.student_type == "roberta":
lowercase : List[str] = False
def lowercase__ ( ) -> Tuple:
'''simple docstring'''
lowercase : Dict = argparse.ArgumentParser(description='Training' )
parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' )
parser.add_argument(
'--dump_path' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='The output directory (log, checkpoints, parameters, etc.)' )
parser.add_argument(
'--data_file' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , )
parser.add_argument(
'--student_type' , type=lowerCAmelCase_ , choices=['distilbert', 'roberta', 'gpt2'] , required=lowerCAmelCase_ , help='The student type (DistilBERT, RoBERTa).' , )
parser.add_argument('--student_config' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='Path to the student configuration.' )
parser.add_argument(
'--student_pretrained_weights' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='Load student initialization checkpoint.' )
parser.add_argument(
'--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=lowerCAmelCase_ , help='Teacher type (BERT, RoBERTa).' )
parser.add_argument('--teacher_name' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='The teacher model.' )
parser.add_argument('--temperature' , default=2.0 , type=lowerCAmelCase_ , help='Temperature for the softmax temperature.' )
parser.add_argument(
'--alpha_ce' , default=0.5 , type=lowerCAmelCase_ , help='Linear weight for the distillation loss. Must be >=0.' )
parser.add_argument(
'--alpha_mlm' , default=0.0 , type=lowerCAmelCase_ , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , )
parser.add_argument('--alpha_clm' , default=0.5 , type=lowerCAmelCase_ , help='Linear weight for the CLM loss. Must be >=0.' )
parser.add_argument('--alpha_mse' , default=0.0 , type=lowerCAmelCase_ , help='Linear weight of the MSE loss. Must be >=0.' )
parser.add_argument(
'--alpha_cos' , default=0.0 , type=lowerCAmelCase_ , help='Linear weight of the cosine embedding loss. Must be >=0.' )
parser.add_argument(
'--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' )
parser.add_argument(
'--mlm_mask_prop' , default=0.1_5 , type=lowerCAmelCase_ , help='Proportion of tokens for which we need to make a prediction.' , )
parser.add_argument('--word_mask' , default=0.8 , type=lowerCAmelCase_ , help='Proportion of tokens to mask out.' )
parser.add_argument('--word_keep' , default=0.1 , type=lowerCAmelCase_ , help='Proportion of tokens to keep.' )
parser.add_argument('--word_rand' , default=0.1 , type=lowerCAmelCase_ , help='Proportion of tokens to randomly replace.' )
parser.add_argument(
'--mlm_smoothing' , default=0.7 , type=lowerCAmelCase_ , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , )
parser.add_argument('--token_counts' , type=lowerCAmelCase_ , help='The token counts in the data_file for MLM.' )
parser.add_argument(
'--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , )
parser.add_argument(
'--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , )
parser.add_argument(
'--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , )
parser.add_argument('--n_epoch' , type=lowerCAmelCase_ , default=3 , help='Number of pass on the whole dataset.' )
parser.add_argument('--batch_size' , type=lowerCAmelCase_ , default=5 , help='Batch size (for each process).' )
parser.add_argument(
'--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , )
parser.add_argument(
'--gradient_accumulation_steps' , type=lowerCAmelCase_ , default=50 , help='Gradient accumulation for larger training batches.' , )
parser.add_argument('--warmup_prop' , default=0.0_5 , type=lowerCAmelCase_ , help='Linear warmup proportion.' )
parser.add_argument('--weight_decay' , default=0.0 , type=lowerCAmelCase_ , help='Weight decay if we apply some.' )
parser.add_argument('--learning_rate' , default=5e-4 , type=lowerCAmelCase_ , help='The initial learning rate for Adam.' )
parser.add_argument('--adam_epsilon' , default=1e-6 , type=lowerCAmelCase_ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , default=5.0 , type=lowerCAmelCase_ , help='Max gradient norm.' )
parser.add_argument('--initializer_range' , default=0.0_2 , type=lowerCAmelCase_ , help='Random initialization range.' )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=lowerCAmelCase_ , default='O1' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_gpu' , type=lowerCAmelCase_ , default=1 , help='Number of GPUs in the node.' )
parser.add_argument('--local_rank' , type=lowerCAmelCase_ , default=-1 , help='Distributed training - Local rank' )
parser.add_argument('--seed' , type=lowerCAmelCase_ , default=56 , help='Random seed' )
parser.add_argument('--log_interval' , type=lowerCAmelCase_ , default=5_00 , help='Tensorboard logging interval.' )
parser.add_argument('--checkpoint_interval' , type=lowerCAmelCase_ , default=40_00 , help='Checkpoint interval.' )
lowercase : Dict = parser.parse_args()
sanity_checks(lowerCAmelCase_ )
# ARGS #
init_gpu_params(lowerCAmelCase_ )
set_seed(lowerCAmelCase_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
' itUse `--force` if you want to overwrite it' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(f'''Param: {args}''' )
with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f:
json.dump(vars(lowerCAmelCase_ ) , lowerCAmelCase_ , indent=4 )
git_log(args.dump_path )
lowercase : Dict = MODEL_CLASSES[args.student_type]
lowercase : Union[str, Any] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
lowercase : Any = teacher_tokenizer_class.from_pretrained(args.teacher_name )
lowercase : Dict = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
lowercase : Tuple = tokenizer.all_special_tokens.index(lowerCAmelCase_ )
lowercase : Tuple = tokenizer.all_special_ids[idx]
logger.info(f'''Special tokens {special_tok_ids}''' )
lowercase : List[Any] = special_tok_ids
lowercase : Tuple = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'''Loading data from {args.data_file}''' )
with open(args.data_file , 'rb' ) as fp:
lowercase : Optional[Any] = pickle.load(lowerCAmelCase_ )
if args.mlm:
logger.info(f'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , 'rb' ) as fp:
lowercase : List[str] = pickle.load(lowerCAmelCase_ )
lowercase : int = np.maximum(lowerCAmelCase_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
lowercase : Any = 0.0 # do not predict special tokens
lowercase : List[Any] = torch.from_numpy(lowerCAmelCase_ )
else:
lowercase : Optional[int] = None
lowercase : Union[str, Any] = LmSeqsDataset(params=lowerCAmelCase_ , data=lowerCAmelCase_ )
logger.info('Data loader created.' )
# STUDENT #
logger.info(f'''Loading student config from {args.student_config}''' )
lowercase : List[Any] = student_config_class.from_pretrained(args.student_config )
lowercase : str = True
if args.student_pretrained_weights is not None:
logger.info(f'''Loading pretrained weights from {args.student_pretrained_weights}''' )
lowercase : Dict = student_model_class.from_pretrained(args.student_pretrained_weights , config=lowerCAmelCase_ )
else:
lowercase : List[Any] = student_model_class(lowerCAmelCase_ )
if args.n_gpu > 0:
student.to(f'''cuda:{args.local_rank}''' )
logger.info('Student loaded.' )
# TEACHER #
lowercase : List[str] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=lowerCAmelCase_ )
if args.n_gpu > 0:
teacher.to(f'''cuda:{args.local_rank}''' )
logger.info(f'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(lowerCAmelCase_ , lowerCAmelCase_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(lowerCAmelCase_ , lowerCAmelCase_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
lowercase : int = Distiller(
params=lowerCAmelCase_ , dataset=lowerCAmelCase_ , token_probs=lowerCAmelCase_ , student=lowerCAmelCase_ , teacher=lowerCAmelCase_ )
distiller.train()
logger.info('Let\'s go get some drinks.' )
if __name__ == "__main__":
main()
| 255 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : List[str] =get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_UpperCAmelCase : Optional[int] =25_0004
_UpperCAmelCase : Tuple =25_0020
@require_sentencepiece
@require_tokenizers
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = MBartTokenizer
SCREAMING_SNAKE_CASE__ : Dict = MBartTokenizerFast
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : List[str] = True
def lowercase_ ( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ : str = MBartTokenizer(__lowercase , keep_accents=__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[int] = MBartTokenizer(__lowercase , keep_accents=__lowercase )
lowerCAmelCase_ : Dict = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowerCAmelCase_ : Optional[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowerCAmelCase_ : Dict = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(
__lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def lowercase_ ( self ) -> Dict:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase_ : int = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase )
lowerCAmelCase_ : int = self.tokenizer_class.from_pretrained(__lowercase , **__lowercase )
lowerCAmelCase_ : Tuple = tempfile.mkdtemp()
lowerCAmelCase_ : Union[str, Any] = tokenizer_r.save_pretrained(__lowercase )
lowerCAmelCase_ : Dict = tokenizer_p.save_pretrained(__lowercase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCAmelCase_ : str = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(__lowercase , __lowercase )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : Tuple = tokenizer_r.from_pretrained(__lowercase )
lowerCAmelCase_ : Dict = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowercase )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase_ : Optional[Any] = tempfile.mkdtemp()
lowerCAmelCase_ : int = tokenizer_r.save_pretrained(__lowercase , legacy_format=__lowercase )
lowerCAmelCase_ : Tuple = tokenizer_p.save_pretrained(__lowercase )
# Checks it save with the same files
self.assertSequenceEqual(__lowercase , __lowercase )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : Optional[int] = tokenizer_r.from_pretrained(__lowercase )
lowerCAmelCase_ : List[Any] = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
shutil.rmtree(__lowercase )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase_ : Optional[Any] = tempfile.mkdtemp()
lowerCAmelCase_ : List[str] = tokenizer_r.save_pretrained(__lowercase , legacy_format=__lowercase )
lowerCAmelCase_ : Optional[int] = tokenizer_p.save_pretrained(__lowercase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : Dict = tokenizer_r.from_pretrained(__lowercase )
lowerCAmelCase_ : List[Any] = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
shutil.rmtree(__lowercase )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = """facebook/mbart-large-en-ro"""
SCREAMING_SNAKE_CASE__ : int = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
SCREAMING_SNAKE_CASE__ : str = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def lowercase_ ( cls ) -> Optional[int]:
lowerCAmelCase_ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
lowerCAmelCase_ : Optional[Any] = 1
return cls
def lowercase_ ( self ) -> Optional[Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 2_5_0_0_2_0 )
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowercase )
def lowercase_ ( self ) -> Any:
self.assertIn(__lowercase , self.tokenizer.all_special_ids )
lowerCAmelCase_ : Union[str, Any] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
lowerCAmelCase_ : Tuple = self.tokenizer.decode(__lowercase , skip_special_tokens=__lowercase )
lowerCAmelCase_ : List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertNotIn(self.tokenizer.eos_token , __lowercase )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Union[str, Any] = ['''this is gunna be a long sentence ''' * 2_0]
assert isinstance(src_text[0] , __lowercase )
lowerCAmelCase_ : str = 1_0
lowerCAmelCase_ : Tuple = self.tokenizer(__lowercase , max_length=__lowercase , truncation=__lowercase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __lowercase )
self.assertEqual(len(__lowercase ) , __lowercase )
def lowercase_ ( self ) -> int:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Any = tempfile.mkdtemp()
lowerCAmelCase_ : int = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : Optional[Any] = MBartTokenizer.from_pretrained(__lowercase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowercase )
@require_torch
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowercase , return_tensors='''pt''' )
lowerCAmelCase_ : Tuple = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowercase , truncation=__lowercase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
lowerCAmelCase_ : int = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
lowerCAmelCase_ : str = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowercase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Optional[Any] = self.tokenizer(self.src_text , padding=__lowercase , truncation=__lowercase , max_length=3 , return_tensors='''pt''' )
lowerCAmelCase_ : Any = self.tokenizer(
text_target=self.tgt_text , padding=__lowercase , truncation=__lowercase , max_length=1_0 , return_tensors='''pt''' )
lowerCAmelCase_ : int = targets['''input_ids''']
lowerCAmelCase_ : Optional[Any] = shift_tokens_right(__lowercase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Any = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(__lowercase ) , {
# A, test, EOS, en_XX
'''input_ids''': [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 2_5_0_0_0_1,
} , ) | 262 | 0 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""",
}
class _UpperCamelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCamelCase__ ="""xlnet"""
lowerCamelCase__ =["""mems"""]
lowerCamelCase__ ={
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : List[str] , a : Tuple=3_2000 , a : List[Any]=1024 , a : List[str]=24 , a : str=16 , a : Any=4096 , a : str="gelu" , a : Union[str, Any]=True , a : str="bi" , a : Optional[int]=0.02 , a : Tuple=1e-12 , a : List[str]=0.1 , a : Dict=512 , a : Union[str, Any]=None , a : Tuple=True , a : Union[str, Any]=False , a : Any=False , a : List[Any]=-1 , a : Optional[int]=False , a : Optional[Any]="last" , a : Optional[int]=True , a : Dict="tanh" , a : Dict=0.1 , a : List[str]=5 , a : Tuple=5 , a : List[Any]=5 , a : Any=1 , a : Optional[int]=2 , **a : int , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = d_model
SCREAMING_SNAKE_CASE : Tuple = n_layer
SCREAMING_SNAKE_CASE : List[Any] = n_head
if d_model % n_head != 0:
raise ValueError(F"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
SCREAMING_SNAKE_CASE : Any = d_model // n_head
SCREAMING_SNAKE_CASE : List[Any] = ff_activation
SCREAMING_SNAKE_CASE : int = d_inner
SCREAMING_SNAKE_CASE : Union[str, Any] = untie_r
SCREAMING_SNAKE_CASE : int = attn_type
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : List[Any] = dropout
SCREAMING_SNAKE_CASE : Tuple = mem_len
SCREAMING_SNAKE_CASE : int = reuse_len
SCREAMING_SNAKE_CASE : List[Any] = bi_data
SCREAMING_SNAKE_CASE : Tuple = clamp_len
SCREAMING_SNAKE_CASE : List[Any] = same_length
SCREAMING_SNAKE_CASE : int = summary_type
SCREAMING_SNAKE_CASE : str = summary_use_proj
SCREAMING_SNAKE_CASE : Union[str, Any] = summary_activation
SCREAMING_SNAKE_CASE : Any = summary_last_dropout
SCREAMING_SNAKE_CASE : Optional[Any] = start_n_top
SCREAMING_SNAKE_CASE : Optional[Any] = end_n_top
SCREAMING_SNAKE_CASE : Union[str, Any] = bos_token_id
SCREAMING_SNAKE_CASE : Tuple = pad_token_id
SCREAMING_SNAKE_CASE : str = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , __lowercase , )
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs['''use_cache''']
SCREAMING_SNAKE_CASE : List[str] = use_mems_eval
SCREAMING_SNAKE_CASE : List[Any] = use_mems_train
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
@property
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def __UpperCamelCase ( self : int , a : List[Any] ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." ) | 76 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = "cpu" , lowerCAmelCase_ = None )-> None:
lowerCAmelCase_ : str = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(lowerCAmelCase_ , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
lowerCAmelCase_ : int = v.half()
if save_path is None: # overwrite src_path
lowerCAmelCase_ : Tuple = src_path
torch.save(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
fire.Fire(convert) | 262 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
__UpperCamelCase : List[str]
__UpperCamelCase : Optional[str] = None
# Automatically constructed
__UpperCamelCase : ClassVar[str] = "dict"
__UpperCamelCase : ClassVar[Any] = None
__UpperCamelCase : str = field(default='''Translation''' , init=UpperCAmelCase__ , repr=UpperCAmelCase__ )
def __call__(self ) -> Union[str, Any]:
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __magic_name__ (self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
__UpperCamelCase : Optional[List] = None
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Optional[str] = None
# Automatically constructed
__UpperCamelCase : ClassVar[str] = "dict"
__UpperCamelCase : ClassVar[Any] = None
__UpperCamelCase : str = field(default='''TranslationVariableLanguages''' , init=UpperCAmelCase__ , repr=UpperCAmelCase__ )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sorted(set(self.languages ) ) if self.languages else None
SCREAMING_SNAKE_CASE__ : int = len(self.languages ) if self.languages else None
def __call__(self ) -> List[str]:
"""simple docstring"""
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = set(self.languages )
if self.languages and set(__lowercase ) - lang_set:
raise ValueError(
F'''Some languages in example ({', '.join(sorted(set(__lowercase ) - lang_set ) )}) are not in valid set ({', '.join(__lowercase )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for lang, text in translation_dict.items():
if isinstance(__lowercase , __lowercase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
SCREAMING_SNAKE_CASE__ : Optional[Any] = zip(*sorted(__lowercase ) )
return {"language": languages, "translation": translations}
def __magic_name__ (self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 25 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 262 | 0 |
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase ) -> None:
UpperCAmelCase_ : int = len(__lowercase )
UpperCAmelCase_ : Dict = [0] * len_array
if len_array > 0:
UpperCAmelCase_ : List[Any] = array[0]
for i in range(1 , __lowercase ):
UpperCAmelCase_ : int = self.prefix_sum[i - 1] + array[i]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __UpperCAmelCase ( self , _UpperCamelCase ) -> bool:
UpperCAmelCase_ : Optional[Any] = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__lowercase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Optional[Any] =logging.get_logger(__name__)
_UpperCAmelCase : str ={
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = """vit_mae"""
def __init__( self , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase=3_0_7_2 , __lowercase="gelu" , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=1e-12 , __lowercase=2_2_4 , __lowercase=1_6 , __lowercase=3 , __lowercase=True , __lowercase=1_6 , __lowercase=5_1_2 , __lowercase=8 , __lowercase=2_0_4_8 , __lowercase=0.75 , __lowercase=False , **__lowercase , ) -> str:
super().__init__(**__lowercase )
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : Any = num_hidden_layers
lowerCAmelCase_ : Any = num_attention_heads
lowerCAmelCase_ : int = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : int = hidden_dropout_prob
lowerCAmelCase_ : str = attention_probs_dropout_prob
lowerCAmelCase_ : List[str] = initializer_range
lowerCAmelCase_ : Dict = layer_norm_eps
lowerCAmelCase_ : Union[str, Any] = image_size
lowerCAmelCase_ : Optional[int] = patch_size
lowerCAmelCase_ : Tuple = num_channels
lowerCAmelCase_ : List[str] = qkv_bias
lowerCAmelCase_ : List[Any] = decoder_num_attention_heads
lowerCAmelCase_ : int = decoder_hidden_size
lowerCAmelCase_ : Optional[int] = decoder_num_hidden_layers
lowerCAmelCase_ : Tuple = decoder_intermediate_size
lowerCAmelCase_ : Tuple = mask_ratio
lowerCAmelCase_ : Any = norm_pix_loss | 262 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Union[str, Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def a (self : str ):
"""simple docstring"""
__snake_case = 1
__snake_case = 3
__snake_case = (32, 32)
__snake_case = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowercase )
return image
@property
def a (self : str ):
"""simple docstring"""
torch.manual_seed(0 )
__snake_case = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def a (self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
__snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def a (self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
__snake_case = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(__lowercase )
@property
def a (self : Optional[int] ):
"""simple docstring"""
def extract(*a__ : str , **a__ : Any ):
class SCREAMING_SNAKE_CASE__ :
def __init__(self : List[str] ):
"""simple docstring"""
__snake_case = torch.ones([0] )
def a (self : Tuple , a__ : str ):
"""simple docstring"""
self.pixel_values.to(__lowercase )
return self
return Out()
return extract
def a (self : Dict ):
"""simple docstring"""
__snake_case = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__snake_case = self.dummy_cond_unet
__snake_case = PNDMScheduler(skip_prk_steps=__lowercase )
__snake_case = self.dummy_vae
__snake_case = self.dummy_text_encoder
__snake_case = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
__snake_case = 77
__snake_case = self.dummy_image.to(__lowercase )
__snake_case = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
__snake_case = AltDiffusionImgaImgPipeline(
unet=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , safety_checker=__lowercase , feature_extractor=self.dummy_extractor , )
__snake_case = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__lowercase )
__snake_case = alt_pipe.to(__lowercase )
alt_pipe.set_progress_bar_config(disable=__lowercase )
__snake_case = '''A painting of a squirrel eating a burger'''
__snake_case = torch.Generator(device=__lowercase ).manual_seed(0 )
__snake_case = alt_pipe(
[prompt] , generator=__lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=__lowercase , )
__snake_case = output.images
__snake_case = torch.Generator(device=__lowercase ).manual_seed(0 )
__snake_case = alt_pipe(
[prompt] , generator=__lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=__lowercase , return_dict=__lowercase , )[0]
__snake_case = image[0, -3:, -3:, -1]
__snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__snake_case = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def a (self : int ):
"""simple docstring"""
__snake_case = self.dummy_cond_unet
__snake_case = PNDMScheduler(skip_prk_steps=__lowercase )
__snake_case = self.dummy_vae
__snake_case = self.dummy_text_encoder
__snake_case = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
__snake_case = 77
__snake_case = self.dummy_image.to(__lowercase )
# put models in fp16
__snake_case = unet.half()
__snake_case = vae.half()
__snake_case = bert.half()
# make sure here that pndm scheduler skips prk
__snake_case = AltDiffusionImgaImgPipeline(
unet=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , safety_checker=__lowercase , feature_extractor=self.dummy_extractor , )
__snake_case = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__lowercase )
__snake_case = alt_pipe.to(__lowercase )
alt_pipe.set_progress_bar_config(disable=__lowercase )
__snake_case = '''A painting of a squirrel eating a burger'''
__snake_case = torch.manual_seed(0 )
__snake_case = alt_pipe(
[prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' , image=__lowercase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
__snake_case = init_image.resize((760, 504) )
__snake_case = '''BAAI/AltDiffusion'''
__snake_case = AltDiffusionImgaImgPipeline.from_pretrained(
__lowercase , safety_checker=__lowercase , )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
pipe.enable_attention_slicing()
__snake_case = '''A fantasy landscape, trending on artstation'''
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=__lowercase , image=__lowercase , strength=0.7_5 , guidance_scale=7.5 , generator=__lowercase , output_type='''np''' , )
__snake_case = output.images[0]
__snake_case = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
__snake_case = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case = init_image.resize((768, 512) )
__snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
__snake_case = '''BAAI/AltDiffusion'''
__snake_case = AltDiffusionImgaImgPipeline.from_pretrained(
__lowercase , safety_checker=__lowercase , )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
pipe.enable_attention_slicing()
__snake_case = '''A fantasy landscape, trending on artstation'''
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=__lowercase , image=__lowercase , strength=0.7_5 , guidance_scale=7.5 , generator=__lowercase , output_type='''np''' , )
__snake_case = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 24 |
def lowerCAmelCase ( lowerCAmelCase_ = 10**9 )-> int:
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Optional[int] = 2
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : str = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
lowerCAmelCase_ : Any = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""") | 262 | 0 |
'''simple docstring'''
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
SCREAMING_SNAKE_CASE_: Optional[Any] =NewType('DataClass', Any)
SCREAMING_SNAKE_CASE_: Dict =NewType('DataClassType', Any)
def lowerCAmelCase_ ( snake_case_ : List[str] ) -> Tuple:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def lowerCAmelCase_ ( snake_case_ : List[str] ) -> Callable[[str], Any]:
'''simple docstring'''
UpperCAmelCase_ = {str(lowerCAmelCase_ ): choice for choice in choices}
return lambda snake_case_ : str_to_choice.get(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase_ ( *,
snake_case_ : Tuple = None , snake_case_ : Tuple = None , snake_case_ : List[str] = dataclasses.MISSING , snake_case_ : Tuple = dataclasses.MISSING , snake_case_ : Tuple = None , **snake_case_ : Tuple , ) -> dataclasses.Field:
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
UpperCAmelCase_ = {}
if aliases is not None:
UpperCAmelCase_ = aliases
if help is not None:
UpperCAmelCase_ = help
return dataclasses.field(metadata=lowerCAmelCase_ , default=lowerCAmelCase_ , default_factory=lowerCAmelCase_ , **lowerCAmelCase_ )
class __A ( UpperCAmelCase__ ):
a__ : Iterable[DataClassType]
def __init__(self : Optional[int] , __a : Optional[Any] , **__a : int ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
UpperCAmelCase_ = ArgumentDefaultsHelpFormatter
super().__init__(**__lowercase )
if dataclasses.is_dataclass(__lowercase ):
UpperCAmelCase_ = [dataclass_types]
UpperCAmelCase_ = list(__lowercase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__lowercase )
@staticmethod
def _lowercase (__a : Dict , __a : str ):
UpperCAmelCase_ = f"""--{field.name}"""
UpperCAmelCase_ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __lowercase ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
UpperCAmelCase_ = kwargs.pop("aliases" , [] )
if isinstance(__lowercase , __lowercase ):
UpperCAmelCase_ = [aliases]
UpperCAmelCase_ = getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(__lowercase , "UnionType" ) and isinstance(__lowercase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__lowercase ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
f""" Problem encountered in field '{field.name}'.""" )
if type(__lowercase ) not in field.type.__args__:
# filter `str` in Union
UpperCAmelCase_ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
UpperCAmelCase_ = getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
UpperCAmelCase_ = (
field.type.__args__[0] if isinstance(__lowercase , field.type.__args__[1] ) else field.type.__args__[1]
)
UpperCAmelCase_ = getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
UpperCAmelCase_ = {}
if origin_type is Literal or (isinstance(field.type , __lowercase ) and issubclass(field.type , __lowercase )):
if origin_type is Literal:
UpperCAmelCase_ = field.type.__args__
else:
UpperCAmelCase_ = [x.value for x in field.type]
UpperCAmelCase_ = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
UpperCAmelCase_ = field.default
else:
UpperCAmelCase_ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
UpperCAmelCase_ = copy(__lowercase )
# Hack because type=bool in argparse does not behave as we want.
UpperCAmelCase_ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
UpperCAmelCase_ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
UpperCAmelCase_ = default
# This tells argparse we accept 0 or 1 value after --field_name
UpperCAmelCase_ = '''?'''
# This is the value that will get picked if we do --field_name (without value)
UpperCAmelCase_ = True
elif isclass(__lowercase ) and issubclass(__lowercase , __lowercase ):
UpperCAmelCase_ = field.type.__args__[0]
UpperCAmelCase_ = '''+'''
if field.default_factory is not dataclasses.MISSING:
UpperCAmelCase_ = field.default_factory()
elif field.default is dataclasses.MISSING:
UpperCAmelCase_ = True
else:
UpperCAmelCase_ = field.type
if field.default is not dataclasses.MISSING:
UpperCAmelCase_ = field.default
elif field.default_factory is not dataclasses.MISSING:
UpperCAmelCase_ = field.default_factory()
else:
UpperCAmelCase_ = True
parser.add_argument(__lowercase , *__lowercase , **__lowercase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
UpperCAmelCase_ = False
parser.add_argument(f"""--no_{field.name}""" , action="store_false" , dest=field.name , **__lowercase )
def _lowercase (self : Tuple , __a : Tuple ):
if hasattr(__lowercase , "_argument_group_name" ):
UpperCAmelCase_ = self.add_argument_group(dtype._argument_group_name )
else:
UpperCAmelCase_ = self
try:
UpperCAmelCase_ = get_type_hints(__lowercase )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__lowercase ):
UpperCAmelCase_ = '''.'''.join(map(__lowercase , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(__lowercase ):
if not field.init:
continue
UpperCAmelCase_ = type_hints[field.name]
self._parse_dataclass_field(__lowercase , __lowercase )
def _lowercase (self : List[str] , __a : str=None , __a : int=False , __a : Optional[Any]=True , __a : str=None , __a : Optional[int]=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
UpperCAmelCase_ = []
if args_filename:
args_files.append(Path(__lowercase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
UpperCAmelCase_ = ArgumentParser()
args_file_parser.add_argument(__lowercase , type=__lowercase , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
UpperCAmelCase_ = args_file_parser.parse_known_args(args=__lowercase )
UpperCAmelCase_ = vars(__lowercase ).get(args_file_flag.lstrip("-" ) , __lowercase )
if cmd_args_file_paths:
args_files.extend([Path(__lowercase ) for p in cmd_args_file_paths] )
UpperCAmelCase_ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
UpperCAmelCase_ = file_args + args if args is not None else file_args + sys.argv[1:]
UpperCAmelCase_ = self.parse_known_args(args=__lowercase )
UpperCAmelCase_ = []
for dtype in self.dataclass_types:
UpperCAmelCase_ = {f.name for f in dataclasses.fields(__lowercase ) if f.init}
UpperCAmelCase_ = {k: v for k, v in vars(__lowercase ).items() if k in keys}
for k in keys:
delattr(__lowercase , __lowercase )
UpperCAmelCase_ = dtype(**__lowercase )
outputs.append(__lowercase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__lowercase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def _lowercase (self : Tuple , __a : int , __a : Tuple = False ):
UpperCAmelCase_ = set(args.keys() )
UpperCAmelCase_ = []
for dtype in self.dataclass_types:
UpperCAmelCase_ = {f.name for f in dataclasses.fields(__lowercase ) if f.init}
UpperCAmelCase_ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
UpperCAmelCase_ = dtype(**__lowercase )
outputs.append(__lowercase )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__lowercase )}""" )
return tuple(__lowercase )
def _lowercase (self : Optional[int] , __a : List[str] , __a : List[Any] = False ):
with open(Path(__lowercase ) , encoding="utf-8" ) as open_json_file:
UpperCAmelCase_ = json.loads(open_json_file.read() )
UpperCAmelCase_ = self.parse_dict(__lowercase , allow_extra_keys=__lowercase )
return tuple(__lowercase )
def _lowercase (self : List[str] , __a : Any , __a : str = False ):
UpperCAmelCase_ = self.parse_dict(yaml.safe_load(Path(__lowercase ).read_text() ) , allow_extra_keys=__lowercase )
return tuple(__lowercase )
| 1 |
import inspect
import unittest
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> int:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def lowercase_ ( self ) -> List[str]:
import diffusers
from diffusers.dependency_versions_table import deps
lowerCAmelCase_ : Any = inspect.getmembers(__lowercase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
lowerCAmelCase_ : Optional[int] = '''k-diffusion'''
elif backend == "invisible_watermark":
lowerCAmelCase_ : Dict = '''invisible-watermark'''
assert backend in deps, f"""{backend} is not in the deps table!""" | 262 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : List[Any] = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
a : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 56 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_UpperCAmelCase : Any =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__lowercase , **__lowercase ) -> None:
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , __lowercase , )
super().__init__(*__lowercase , **__lowercase ) | 262 | 0 |
"""simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = """T5Config"""
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
"""simple docstring"""
a : Optional[Any] ="""mt5"""
a : Union[str, Any] =MTaConfig
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
"""simple docstring"""
a : Dict ="""mt5"""
a : Optional[int] =MTaConfig
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
"""simple docstring"""
a : List[Any] ="""mt5"""
a : Dict =MTaConfig
| 108 |
def lowerCAmelCase ( lowerCAmelCase_ )-> set:
lowerCAmelCase_ : Optional[int] = set()
# edges = list of graph's edges
lowerCAmelCase_ : List[str] = get_edges(lowerCAmelCase_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = edges.pop()
chosen_vertices.add(lowerCAmelCase_ )
chosen_vertices.add(lowerCAmelCase_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowerCAmelCase_ )
return chosen_vertices
def lowerCAmelCase ( lowerCAmelCase_ )-> set:
lowerCAmelCase_ : List[Any] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}") | 262 | 0 |
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=64 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=64 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
def lowerCamelCase ( self ):
'''simple docstring'''
return MPNetConfig.from_pretrained('''microsoft/mpnet-base''' )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self ):
'''simple docstring'''
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = MPNetModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__lowerCamelCase = model(__lowercase , __lowercase )
__lowerCamelCase = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = MPNetForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
__lowerCamelCase = model(
__lowercase , attention_mask=__lowercase , start_positions=__lowercase , end_positions=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = MPNetForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
__lowerCamelCase = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_choices
__lowerCamelCase = MPNetForMultipleChoice(config=__lowercase )
model.to(__lowercase )
model.eval()
__lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = model(
__lowercase , attention_mask=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = MPNetForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
__lowerCamelCase = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
(__lowerCamelCase) = config_and_inputs
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
"""feature-extraction""": MPNetModel,
"""fill-mask""": MPNetForMaskedLM,
"""question-answering""": MPNetForQuestionAnswering,
"""text-classification""": MPNetForSequenceClassification,
"""token-classification""": MPNetForTokenClassification,
"""zero-shot""": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MPNetModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*__lowercase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*__lowercase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*__lowercase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*__lowercase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*__lowercase )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MPNetModel.from_pretrained('''microsoft/mpnet-base''' )
__lowerCamelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__lowerCamelCase = model(__lowercase )[0]
__lowerCamelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __lowercase )
__lowerCamelCase = torch.tensor(
[[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=1E-4 ) )
| 330 |
from math import sqrt
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase_ : List[Any] = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase_ : Optional[int] = False
for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase_ : Tuple = False
break
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool"
return status
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase_ : Tuple = list(range(2 , n + 1 ) )
lowerCAmelCase_ : Optional[int] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase_ : str = 0
# filters actual prime numbers.
lowerCAmelCase_ : Optional[int] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase_ : List[Any] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCAmelCase_ ):
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase_ : int = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase_ : List[Any] = 2
lowerCAmelCase_ : Optional[int] = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase_ ):
while quotient != 1:
if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0):
ans.append(lowerCAmelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[int]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : Dict = 0
# prime factorization of 'number'
lowerCAmelCase_ : Any = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = max(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : List[Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ : Dict = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : int = min(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ )
), "'number' must been an int, even and > 2"
lowerCAmelCase_ : str = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase_ : int = get_prime_numbers(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = len(lowerCAmelCase_ )
# run variable for while-loops.
lowerCAmelCase_ : Union[str, Any] = 0
lowerCAmelCase_ : Tuple = None
# exit variable. for break up the loops
lowerCAmelCase_ : int = True
while i < len_pn and loop:
lowerCAmelCase_ : int = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase_ : Tuple = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (len(lowerCAmelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : int = 0
while numbera != 0:
lowerCAmelCase_ : str = numbera % numbera
lowerCAmelCase_ : List[Any] = numbera
lowerCAmelCase_ : Any = rest
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : List[Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ )
elif numbera == 1 or numbera == 1:
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : Union[str, Any] = max(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : Union[str, Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase_ : Optional[Any] = prime_fac_a.count(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ):
ans *= n
else:
lowerCAmelCase_ : List[str] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase_ : Optional[Any] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : Optional[int] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime(
lowerCAmelCase_ ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
assert (
is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase_ : Union[str, Any] = p_number_a + 1 # jump to the next number
lowerCAmelCase_ : Optional[int] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase_ : List[Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase_ : Union[str, Any] = get_divisors(lowerCAmelCase_ )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase_ : Optional[Any] = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase_ : Any = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Union[str, Any] = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase_ : Union[str, Any] = ans
ans += fiba
lowerCAmelCase_ : Optional[Any] = tmp
return ans | 262 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCAmelCase_ = 250_004
lowerCAmelCase_ = 250_020
@require_sentencepiece
@require_tokenizers
class __A ( UpperCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Tuple = MBartTokenizer
lowerCAmelCase : Dict = MBartTokenizerFast
lowerCAmelCase : Tuple = True
lowerCAmelCase : List[str] = True
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ : str = MBartTokenizer(__lowercase ,keep_accents=__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = MBartTokenizer(__lowercase ,keep_accents=__lowercase )
lowercase__ : Dict = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowercase ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
lowercase__ : Optional[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] ,)
lowercase__ : Dict = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(
__lowercase ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] ,)
lowercase__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] ,)
def UpperCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowercase__ : int = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(__lowercase ,**__lowercase )
lowercase__ : int = self.tokenizer_class.from_pretrained(__lowercase ,**__lowercase )
lowercase__ : Tuple = tempfile.mkdtemp()
lowercase__ : Union[str, Any] = tokenizer_r.save_pretrained(__lowercase )
lowercase__ : Dict = tokenizer_p.save_pretrained(__lowercase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowercase__ : str = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(__lowercase ,__lowercase )
# Checks everything loads correctly in the same way
lowercase__ : Tuple = tokenizer_r.from_pretrained(__lowercase )
lowercase__ : Dict = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase ,__lowercase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowercase )
# Save tokenizer rust, legacy_format=True
lowercase__ : Optional[Any] = tempfile.mkdtemp()
lowercase__ : int = tokenizer_r.save_pretrained(__lowercase ,legacy_format=__lowercase )
lowercase__ : Tuple = tokenizer_p.save_pretrained(__lowercase )
# Checks it save with the same files
self.assertSequenceEqual(__lowercase ,__lowercase )
# Checks everything loads correctly in the same way
lowercase__ : Optional[int] = tokenizer_r.from_pretrained(__lowercase )
lowercase__ : List[Any] = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase ,__lowercase ) )
shutil.rmtree(__lowercase )
# Save tokenizer rust, legacy_format=False
lowercase__ : Optional[Any] = tempfile.mkdtemp()
lowercase__ : List[str] = tokenizer_r.save_pretrained(__lowercase ,legacy_format=__lowercase )
lowercase__ : Optional[int] = tokenizer_p.save_pretrained(__lowercase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowercase__ : Dict = tokenizer_r.from_pretrained(__lowercase )
lowercase__ : List[Any] = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase ,__lowercase ) )
shutil.rmtree(__lowercase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : int = """facebook/mbart-large-en-ro"""
lowerCAmelCase : int = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
lowerCAmelCase : Optional[int] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
lowerCAmelCase : str = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def UpperCAmelCase ( cls : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase__ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name ,src_lang='''en_XX''' ,tgt_lang='''ro_RO''' )
lowercase__ : Optional[Any] = 1
return cls
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] ,250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] ,250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] ,250_020 )
def UpperCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
lowercase__ : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,__lowercase )
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
self.assertIn(__lowercase ,self.tokenizer.all_special_ids )
lowercase__ : Union[str, Any] = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
lowercase__ : Tuple = self.tokenizer.decode(__lowercase ,skip_special_tokens=__lowercase )
lowercase__ : List[Any] = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=__lowercase )
self.assertEqual(__lowercase ,__lowercase )
self.assertNotIn(self.tokenizer.eos_token ,__lowercase )
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Union[str, Any] = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] ,__lowercase )
lowercase__ : str = 10
lowercase__ : Tuple = self.tokenizer(__lowercase ,max_length=__lowercase ,truncation=__lowercase ).input_ids[0]
self.assertEqual(ids[-2] ,2 )
self.assertEqual(ids[-1] ,__lowercase )
self.assertEqual(len(__lowercase ) ,__lowercase )
def UpperCAmelCase ( self : str ) -> int:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) ,[250_026, 250_001] )
def UpperCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
lowercase__ : Any = tempfile.mkdtemp()
lowercase__ : int = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowercase )
lowercase__ : Optional[Any] = MBartTokenizer.from_pretrained(__lowercase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,__lowercase )
@require_torch
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Tuple = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=__lowercase ,return_tensors='''pt''' )
lowercase__ : Tuple = shift_tokens_right(batch['''labels'''] ,self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ : str = self.tokenizer(
self.src_text ,text_target=self.tgt_text ,padding=__lowercase ,truncation=__lowercase ,max_length=len(self.expected_src_tokens ) ,return_tensors='''pt''' ,)
lowercase__ : int = shift_tokens_right(batch['''labels'''] ,self.tokenizer.pad_token_id )
self.assertIsInstance(__lowercase ,__lowercase )
self.assertEqual((2, 14) ,batch.input_ids.shape )
self.assertEqual((2, 14) ,batch.attention_mask.shape )
lowercase__ : str = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens ,__lowercase )
self.assertEqual(2 ,batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens ,[] )
self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id, EN_CODE] )
def UpperCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.tokenizer(self.src_text ,padding=__lowercase ,truncation=__lowercase ,max_length=3 ,return_tensors='''pt''' )
lowercase__ : Any = self.tokenizer(
text_target=self.tgt_text ,padding=__lowercase ,truncation=__lowercase ,max_length=10 ,return_tensors='''pt''' )
lowercase__ : int = targets['''input_ids''']
lowercase__ : Optional[Any] = shift_tokens_right(__lowercase ,self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.decoder_input_ids.shape[1] ,10 )
@require_torch
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : Any = self.tokenizer._build_translation_inputs(
'''A test''' ,return_tensors='''pt''' ,src_lang='''en_XX''' ,tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(__lowercase ) ,{
# A, test, EOS, en_XX
'''input_ids''': [[62, 3_034, 2, 250_004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 250_001,
} ,)
| 16 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_UpperCAmelCase : Union[str, Any] ="""pt"""
elif is_tf_available():
_UpperCAmelCase : List[Any] ="""tf"""
else:
_UpperCAmelCase : Optional[int] ="""jax"""
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PerceiverTokenizer
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def lowercase_ ( self ) -> Optional[int]:
super().setUp()
lowerCAmelCase_ : str = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self ) -> Any:
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def lowercase_ ( self , **__lowercase ) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase=False , __lowercase=2_0 , __lowercase=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowerCAmelCase_ : Optional[Any] = []
for i in range(len(__lowercase ) ):
try:
lowerCAmelCase_ : List[str] = tokenizer.decode([i] , clean_up_tokenization_spaces=__lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCAmelCase_ : List[str] = list(filter(lambda __lowercase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , __lowercase ) )
lowerCAmelCase_ : Optional[int] = list(filter(lambda __lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowercase ) , __lowercase ) )
if max_length is not None and len(__lowercase ) > max_length:
lowerCAmelCase_ : Union[str, Any] = toks[:max_length]
if min_length is not None and len(__lowercase ) < min_length and len(__lowercase ) > 0:
while len(__lowercase ) < min_length:
lowerCAmelCase_ : Union[str, Any] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase_ : List[str] = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase_ : int = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
if " " not in output_txt and len(__lowercase ) > 1:
lowerCAmelCase_ : Optional[int] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowercase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowercase )
)
if with_prefix_space:
lowerCAmelCase_ : Any = ''' ''' + output_txt
lowerCAmelCase_ : List[str] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
return output_txt, output_ids
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : List[str] = self.perceiver_tokenizer
lowerCAmelCase_ : Any = '''Unicode €.'''
lowerCAmelCase_ : Dict = tokenizer(__lowercase )
lowerCAmelCase_ : Any = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['''input_ids'''] , __lowercase )
# decoding
lowerCAmelCase_ : str = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , '''[CLS]Unicode €.[SEP]''' )
lowerCAmelCase_ : Optional[int] = tokenizer('''e è é ê ë''' )
lowerCAmelCase_ : str = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['''input_ids'''] , __lowercase )
# decoding
lowerCAmelCase_ : int = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Any = self.perceiver_tokenizer
lowerCAmelCase_ : Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
lowerCAmelCase_ : Optional[int] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
lowerCAmelCase_ : Optional[int] = tokenizer(__lowercase , padding=__lowercase , return_tensors=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
if FRAMEWORK != "jax":
lowerCAmelCase_ : str = list(batch.input_ids.numpy()[0] )
else:
lowerCAmelCase_ : Union[str, Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__lowercase , __lowercase )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : int = self.perceiver_tokenizer
lowerCAmelCase_ : Optional[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCAmelCase_ : List[Any] = tokenizer(__lowercase , padding=__lowercase , return_tensors=__lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , __lowercase )
self.assertIn('''attention_mask''' , __lowercase )
self.assertNotIn('''decoder_input_ids''' , __lowercase )
self.assertNotIn('''decoder_attention_mask''' , __lowercase )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = self.perceiver_tokenizer
lowerCAmelCase_ : int = [
'''Summary of the text.''',
'''Another summary.''',
]
lowerCAmelCase_ : List[str] = tokenizer(
text_target=__lowercase , max_length=3_2 , padding='''max_length''' , truncation=__lowercase , return_tensors=__lowercase )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
def lowercase_ ( self ) -> Optional[Any]:
# safety check on max_len default value so we are sure the test works
lowerCAmelCase_ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
lowerCAmelCase_ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase_ : Union[str, Any] = tempfile.mkdtemp()
lowerCAmelCase_ : str = ''' He is very happy, UNwant\u00E9d,running'''
lowerCAmelCase_ : Optional[int] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : Any = tokenizer.__class__.from_pretrained(__lowercase )
lowerCAmelCase_ : Tuple = after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
shutil.rmtree(__lowercase )
lowerCAmelCase_ : Optional[int] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase_ : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase_ : List[str] = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
lowerCAmelCase_ : Any = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowerCAmelCase_ : str = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : str = tokenizer.__class__.from_pretrained(__lowercase )
lowerCAmelCase_ : Optional[Any] = after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
lowerCAmelCase_ : str = tokenizer.__class__.from_pretrained(__lowercase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(__lowercase )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowercase )
with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase_ : Tuple = json.load(__lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase_ : Any = json.load(__lowercase )
lowerCAmelCase_ : Optional[int] = [f"""<extra_id_{i}>""" for i in range(1_2_5 )]
lowerCAmelCase_ : Optional[Any] = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
lowerCAmelCase_ : Any = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowercase , __lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowercase , __lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase_ : int = tokenizer_class.from_pretrained(
__lowercase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase_ : Tuple = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=__lowercase )]
lowerCAmelCase_ : Dict = tokenizer_class.from_pretrained(
__lowercase , additional_special_tokens=__lowercase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Any = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , '''�''' )
def lowercase_ ( self ) -> Tuple:
pass
def lowercase_ ( self ) -> Any:
pass
def lowercase_ ( self ) -> Tuple:
pass
def lowercase_ ( self ) -> List[str]:
pass
def lowercase_ ( self ) -> Dict:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
lowerCAmelCase_ : Tuple = self.get_tokenizers(fast=__lowercase , do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase_ : List[str] = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
lowerCAmelCase_ : Optional[int] = tokenizer.convert_tokens_to_string(__lowercase )
self.assertIsInstance(__lowercase , __lowercase ) | 262 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
a : int = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] ) ->List[List[ImageInput]]:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCAmelCase_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCAmelCase_ ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class __UpperCamelCase ( UpperCAmelCase__ ):
lowerCamelCase : List[str] =["""pixel_values"""]
def __init__( self , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = PILImageResampling.BILINEAR , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 255 , lowerCAmelCase__ = True , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> None:
super().__init__(**__lowercase )
a : Union[str, Any] = size if size is not None else {'''shortest_edge''': 256}
a : Tuple = get_size_dict(__lowercase , default_to_square=__lowercase )
a : List[str] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
a : Optional[Any] = get_size_dict(__lowercase , param_name="crop_size" )
a : Any = do_resize
a : Optional[int] = size
a : Optional[int] = do_center_crop
a : int = crop_size
a : Optional[Any] = resample
a : Dict = do_rescale
a : str = rescale_factor
a : int = offset
a : Optional[int] = do_normalize
a : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = PILImageResampling.BILINEAR , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> np.ndarray:
a : Union[str, Any] = get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" in size:
a : str = get_resize_output_image_size(__lowercase , size["shortest_edge"] , default_to_square=__lowercase )
elif "height" in size and "width" in size:
a : Tuple = (size['''height'''], size['''width'''])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> np.ndarray:
a : Tuple = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(__lowercase , size=(size["height"], size["width"]) , data_format=__lowercase , **__lowercase )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> List[Any]:
a : List[Any] = image.astype(np.floataa )
if offset:
a : Union[str, Any] = image - (scale / 2)
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> np.ndarray:
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
a : int = to_numpy_array(__lowercase )
if do_resize:
a : Dict = self.resize(image=__lowercase , size=__lowercase , resample=__lowercase )
if do_center_crop:
a : int = self.center_crop(__lowercase , size=__lowercase )
if do_rescale:
a : Dict = self.rescale(image=__lowercase , scale=__lowercase , offset=__lowercase )
if do_normalize:
a : Union[str, Any] = self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase )
a : Optional[int] = to_channel_dimension_format(__lowercase , __lowercase )
return image
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = ChannelDimension.FIRST , **lowerCAmelCase__ , ) -> PIL.Image.Image:
a : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
a : List[Any] = resample if resample is not None else self.resample
a : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Dict = do_rescale if do_rescale is not None else self.do_rescale
a : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
a : Optional[int] = offset if offset is not None else self.offset
a : Dict = do_normalize if do_normalize is not None else self.do_normalize
a : Any = image_mean if image_mean is not None else self.image_mean
a : Optional[Any] = image_std if image_std is not None else self.image_std
a : Dict = size if size is not None else self.size
a : Dict = get_size_dict(__lowercase , default_to_square=__lowercase )
a : Optional[Any] = crop_size if crop_size is not None else self.crop_size
a : List[Any] = get_size_dict(__lowercase , param_name="crop_size" )
if not valid_images(__lowercase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
a : List[Any] = make_batched(__lowercase )
a : Tuple = [
[
self._preprocess_image(
image=__lowercase , do_resize=__lowercase , size=__lowercase , resample=__lowercase , do_center_crop=__lowercase , crop_size=__lowercase , do_rescale=__lowercase , rescale_factor=__lowercase , offset=__lowercase , do_normalize=__lowercase , image_mean=__lowercase , image_std=__lowercase , data_format=__lowercase , )
for img in video
]
for video in videos
]
a : Any = {'''pixel_values''': videos}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 105 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class snake_case__:
'''simple docstring'''
@staticmethod
def lowercase_ ( *__lowercase , **__lowercase ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
@require_torch
class snake_case__( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCAmelCase_ : str = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def lowercase_ ( self , __lowercase , __lowercase ) -> str:
lowerCAmelCase_ : Tuple = object_detector(examples[0] , threshold=0.0 )
lowerCAmelCase_ : Dict = len(__lowercase )
self.assertGreater(__lowercase , 0 )
self.assertEqual(
__lowercase , [
{
'''score''': ANY(__lowercase ),
'''label''': ANY(__lowercase ),
'''box''': {'''xmin''': ANY(__lowercase ), '''ymin''': ANY(__lowercase ), '''xmax''': ANY(__lowercase ), '''ymax''': ANY(__lowercase )},
}
for i in range(__lowercase )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def lowercase_ ( self ) -> List[str]:
pass
@require_torch
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Union[str, Any] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCAmelCase_ : Union[str, Any] = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
] , )
lowerCAmelCase_ : Union[str, Any] = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
]
] , )
@require_torch
@slow
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Any = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase_ : Dict = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
] , )
lowerCAmelCase_ : Tuple = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def lowercase_ ( self ) -> List[str]:
pass
@require_torch
@slow
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Any = 0.2
lowerCAmelCase_ : List[Any] = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase_ : Optional[Any] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=__lowercase , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
] , )
@require_torch
@slow
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Dict = 2
lowerCAmelCase_ : Union[str, Any] = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase_ : Optional[Any] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=__lowercase , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
] , ) | 262 | 0 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class a__ :
def __init__( self : Any, lowerCAmelCase : int, lowerCAmelCase : int=13, lowerCAmelCase : Dict=7, lowerCAmelCase : Optional[int]=True, lowerCAmelCase : List[Any]=True, lowerCAmelCase : Any=True, lowerCAmelCase : Tuple=True, lowerCAmelCase : Union[str, Any]=99, lowerCAmelCase : List[str]=16, lowerCAmelCase : List[Any]=36, lowerCAmelCase : str=6, lowerCAmelCase : Optional[Any]=6, lowerCAmelCase : Optional[int]=6, lowerCAmelCase : List[str]=37, lowerCAmelCase : Optional[Any]="gelu", lowerCAmelCase : Union[str, Any]=0.1, lowerCAmelCase : Optional[Any]=0.1, lowerCAmelCase : Union[str, Any]=512, lowerCAmelCase : Optional[Any]=16, lowerCAmelCase : Dict=2, lowerCAmelCase : int=0.02, lowerCAmelCase : List[Any]=3, lowerCAmelCase : Any=4, lowerCAmelCase : Tuple=None, ) -> Any:
lowercase : Optional[int] = parent
lowercase : Optional[int] = batch_size
lowercase : List[str] = seq_length
lowercase : Union[str, Any] = is_training
lowercase : int = use_input_mask
lowercase : Optional[Any] = use_token_type_ids
lowercase : List[str] = use_labels
lowercase : Tuple = vocab_size
lowercase : Optional[Any] = embedding_size
lowercase : str = hidden_size
lowercase : Tuple = num_hidden_layers
lowercase : Any = num_hidden_groups
lowercase : Tuple = num_attention_heads
lowercase : int = intermediate_size
lowercase : Any = hidden_act
lowercase : str = hidden_dropout_prob
lowercase : Tuple = attention_probs_dropout_prob
lowercase : Union[str, Any] = max_position_embeddings
lowercase : str = type_vocab_size
lowercase : Optional[Any] = type_sequence_label_size
lowercase : Union[str, Any] = initializer_range
lowercase : Optional[int] = num_labels
lowercase : Optional[Any] = num_choices
lowercase : List[str] = scope
def lowercase ( self : Optional[int] ) -> List[Any]:
lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase : int = None
if self.use_input_mask:
lowercase : int = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Dict = None
if self.use_token_type_ids:
lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowercase : List[str] = None
lowercase : int = None
lowercase : Optional[Any] = None
if self.use_labels:
lowercase : List[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase : Optional[int] = ids_tensor([self.batch_size], self.num_choices )
lowercase : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self : Optional[Any] ) -> List[str]:
return AlbertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, num_hidden_groups=self.num_hidden_groups, )
def lowercase ( self : str, lowerCAmelCase : List[str], lowerCAmelCase : Optional[int], lowerCAmelCase : List[str], lowerCAmelCase : List[str], lowerCAmelCase : Optional[Any], lowerCAmelCase : str, lowerCAmelCase : Optional[int] ) -> Optional[int]:
lowercase : Optional[Any] = AlbertModel(config=__lowercase )
model.to(__lowercase )
model.eval()
lowercase : str = model(__lowercase, attention_mask=__lowercase, token_type_ids=__lowercase )
lowercase : str = model(__lowercase, token_type_ids=__lowercase )
lowercase : str = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def lowercase ( self : Any, lowerCAmelCase : Optional[Any], lowerCAmelCase : Optional[Any], lowerCAmelCase : List[str], lowerCAmelCase : Dict, lowerCAmelCase : Any, lowerCAmelCase : Dict, lowerCAmelCase : List[str] ) -> int:
lowercase : List[str] = AlbertForPreTraining(config=__lowercase )
model.to(__lowercase )
model.eval()
lowercase : Tuple = model(
__lowercase, attention_mask=__lowercase, token_type_ids=__lowercase, labels=__lowercase, sentence_order_label=__lowercase, )
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape, (self.batch_size, config.num_labels) )
def lowercase ( self : int, lowerCAmelCase : Optional[Any], lowerCAmelCase : List[Any], lowerCAmelCase : List[str], lowerCAmelCase : str, lowerCAmelCase : Dict, lowerCAmelCase : Optional[Any], lowerCAmelCase : Optional[int] ) -> List[Any]:
lowercase : int = AlbertForMaskedLM(config=__lowercase )
model.to(__lowercase )
model.eval()
lowercase : int = model(__lowercase, attention_mask=__lowercase, token_type_ids=__lowercase, labels=__lowercase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self : Optional[int], lowerCAmelCase : Dict, lowerCAmelCase : Dict, lowerCAmelCase : Tuple, lowerCAmelCase : Any, lowerCAmelCase : Union[str, Any], lowerCAmelCase : int, lowerCAmelCase : Optional[int] ) -> List[Any]:
lowercase : List[str] = AlbertForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
lowercase : Dict = model(
__lowercase, attention_mask=__lowercase, token_type_ids=__lowercase, start_positions=__lowercase, end_positions=__lowercase, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase ( self : List[Any], lowerCAmelCase : Any, lowerCAmelCase : List[Any], lowerCAmelCase : Dict, lowerCAmelCase : List[str], lowerCAmelCase : Dict, lowerCAmelCase : List[Any], lowerCAmelCase : List[Any] ) -> Any:
lowercase : Dict = self.num_labels
lowercase : Optional[Any] = AlbertForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
lowercase : str = model(__lowercase, attention_mask=__lowercase, token_type_ids=__lowercase, labels=__lowercase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase ( self : Tuple, lowerCAmelCase : str, lowerCAmelCase : List[Any], lowerCAmelCase : str, lowerCAmelCase : List[Any], lowerCAmelCase : List[Any], lowerCAmelCase : Optional[int], lowerCAmelCase : List[str] ) -> Optional[Any]:
lowercase : Any = self.num_labels
lowercase : Tuple = AlbertForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
lowercase : Optional[Any] = model(__lowercase, attention_mask=__lowercase, token_type_ids=__lowercase, labels=__lowercase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self : Dict, lowerCAmelCase : Tuple, lowerCAmelCase : Union[str, Any], lowerCAmelCase : Union[str, Any], lowerCAmelCase : List[Any], lowerCAmelCase : Tuple, lowerCAmelCase : str, lowerCAmelCase : Tuple ) -> Tuple:
lowercase : int = self.num_choices
lowercase : Dict = AlbertForMultipleChoice(config=__lowercase )
model.to(__lowercase )
model.eval()
lowercase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowercase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowercase : str = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowercase : List[str] = model(
__lowercase, attention_mask=__lowercase, token_type_ids=__lowercase, labels=__lowercase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def lowercase ( self : str ) -> Dict:
lowercase : List[str] = self.prepare_config_and_inputs()
(
lowercase
) : Any = config_and_inputs
lowercase : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a__ ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
_lowerCamelCase = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowerCamelCase = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase = True
def lowercase ( self : Tuple, lowerCAmelCase : Dict, lowerCAmelCase : Optional[Any], lowerCAmelCase : int=False ) -> Optional[Any]:
lowercase : Any = super()._prepare_for_class(__lowercase, __lowercase, return_labels=__lowercase )
if return_labels:
if model_class in get_values(__lowercase ):
lowercase : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=__lowercase )
lowercase : Optional[Any] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=__lowercase )
return inputs_dict
def lowercase ( self : List[str] ) -> str:
lowercase : str = AlbertModelTester(self )
lowercase : int = ConfigTester(self, config_class=__lowercase, hidden_size=37 )
def lowercase ( self : Any ) -> int:
self.config_tester.run_common_tests()
def lowercase ( self : Union[str, Any] ) -> List[Any]:
lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def lowercase ( self : List[str] ) -> Dict:
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowercase )
def lowercase ( self : Optional[int] ) -> List[str]:
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowercase )
def lowercase ( self : int ) -> List[Any]:
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowercase )
def lowercase ( self : Dict ) -> List[Any]:
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowercase )
def lowercase ( self : Union[str, Any] ) -> List[str]:
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowercase )
def lowercase ( self : Optional[Any] ) -> Union[str, Any]:
lowercase : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase : Union[str, Any] = type
self.model_tester.create_and_check_model(*__lowercase )
@slow
def lowercase ( self : List[str] ) -> Union[str, Any]:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : str = AlbertModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
@require_torch
class a__ ( unittest.TestCase ):
@slow
def lowercase ( self : List[Any] ) -> Optional[Any]:
lowercase : Union[str, Any] = AlbertModel.from_pretrained('albert-base-v2' )
lowercase : Optional[int] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase : Dict = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase : List[str] = model(__lowercase, attention_mask=__lowercase )[0]
lowercase : Tuple = torch.Size((1, 11, 768) )
self.assertEqual(output.shape, __lowercase )
lowercase : Optional[Any] = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], __lowercase, atol=1e-4 ) )
| 255 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels | 262 | 0 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
a_ = logging.getLogger(__name__)
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'
)
parser.add_argument(
'--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.'
)
parser.add_argument(
'--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.'
)
parser.add_argument('--vocab_size', default=3_0522, type=int)
a_ = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, 'rb') as fp:
a_ = pickle.load(fp)
logger.info('Counting occurrences for MLM.')
a_ = Counter()
for tk_ids in data:
counter.update(tk_ids)
a_ = [0] * args.vocab_size
for k, v in counter.items():
a_ = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, 'wb') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL) | 76 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
_UpperCAmelCase : Dict ={
"""susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""",
"""susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""",
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """ernie_m"""
SCREAMING_SNAKE_CASE__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __lowercase = 2_5_0_0_0_2 , __lowercase = 7_6_8 , __lowercase = 1_2 , __lowercase = 1_2 , __lowercase = 3_0_7_2 , __lowercase = "gelu" , __lowercase = 0.1 , __lowercase = 0.1 , __lowercase = 5_1_4 , __lowercase = 0.02 , __lowercase = 1 , __lowercase = 1e-05 , __lowercase=None , __lowercase=False , __lowercase=0.0 , **__lowercase , ) -> Tuple:
super().__init__(pad_token_id=__lowercase , **__lowercase )
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : Tuple = num_hidden_layers
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : Dict = intermediate_size
lowerCAmelCase_ : int = hidden_act
lowerCAmelCase_ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase_ : Any = attention_probs_dropout_prob
lowerCAmelCase_ : Union[str, Any] = max_position_embeddings
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : List[str] = layer_norm_eps
lowerCAmelCase_ : List[Any] = classifier_dropout
lowerCAmelCase_ : Any = is_decoder
lowerCAmelCase_ : List[Any] = act_dropout | 262 | 0 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def lowercase_ ( _snake_case ):
if isinstance(lowerCAmelCase_ ,collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class lowerCAmelCase_ :
"""simple docstring"""
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
pass
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
pass
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
pass
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.abs((a - b) ).max()
self.assertLessEqual(__lowercase , __lowercase , F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FlaxVisionTextDualEncoderModel(__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_vision_text_model(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = {'''vision_model''': vision_model, '''text_model''': text_model}
SCREAMING_SNAKE_CASE__ : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.get_vision_text_model(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : str = {'''vision_model''': vision_model, '''text_model''': text_model}
SCREAMING_SNAKE_CASE__ : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowercase )
SCREAMING_SNAKE_CASE__ : int = FlaxVisionTextDualEncoderModel.from_pretrained(__lowercase )
SCREAMING_SNAKE_CASE__ : Any = model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = after_output[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowercase , 1E-3 )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.get_vision_text_model(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = {'''vision_model''': vision_model, '''text_model''': text_model}
SCREAMING_SNAKE_CASE__ : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(
input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase , output_attentions=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = output.vision_model_output.attentions
self.assertEqual(len(__lowercase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = to_atuple(vision_model.config.image_size )
SCREAMING_SNAKE_CASE__ : int = to_atuple(vision_model.config.patch_size )
SCREAMING_SNAKE_CASE__ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
SCREAMING_SNAKE_CASE__ : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
SCREAMING_SNAKE_CASE__ : int = output.text_model_output.attentions
self.assertEqual(len(__lowercase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
pt_model.to(__lowercase )
pt_model.eval()
# prepare inputs
SCREAMING_SNAKE_CASE__ : Dict = inputs_dict
SCREAMING_SNAKE_CASE__ : Optional[int] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Dict = pt_model(**__lowercase ).to_tuple()
SCREAMING_SNAKE_CASE__ : str = fx_model(**__lowercase ).to_tuple()
self.assertEqual(len(__lowercase ) , len(__lowercase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__lowercase , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(__lowercase , from_pt=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = fx_model_loaded(**__lowercase ).to_tuple()
self.assertEqual(len(__lowercase ) , len(__lowercase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__lowercase , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = VisionTextDualEncoderModel.from_pretrained(__lowercase , from_flax=__lowercase )
pt_model_loaded.to(__lowercase )
pt_model_loaded.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = pt_model_loaded(**__lowercase ).to_tuple()
self.assertEqual(len(__lowercase ) , len(__lowercase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(__lowercase , pt_output_loaded.numpy() , 4E-2 )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = VisionTextDualEncoderModel(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FlaxVisionTextDualEncoderModel(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __lowercase )
SCREAMING_SNAKE_CASE__ : Dict = fx_state
self.check_pt_flax_equivalence(__lowercase , __lowercase , __lowercase )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : int = VisionTextDualEncoderModel(__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = FlaxVisionTextDualEncoderModel(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict = load_flax_weights_in_pytorch_model(__lowercase , fx_model.params )
self.check_pt_flax_equivalence(__lowercase , __lowercase , __lowercase )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__lowercase )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__lowercase )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.prepare_config_and_inputs()
self.check_save_load(**__lowercase )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__lowercase )
@is_pt_flax_cross_test
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Tuple = config_inputs_dict.pop("""vision_config""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = config_inputs_dict.pop("""text_config""" )
SCREAMING_SNAKE_CASE__ : List[Any] = config_inputs_dict
self.check_equivalence_pt_to_flax(__lowercase , __lowercase , __lowercase )
self.check_equivalence_flax_to_pt(__lowercase , __lowercase , __lowercase )
@slow
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.get_pretrained_model_and_inputs()
SCREAMING_SNAKE_CASE__ : Tuple = model_a(**__lowercase )
SCREAMING_SNAKE_CASE__ : int = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__lowercase )
SCREAMING_SNAKE_CASE__ : Any = FlaxVisionTextDualEncoderModel.from_pretrained(__lowercase )
SCREAMING_SNAKE_CASE__ : str = model_a(**__lowercase )
SCREAMING_SNAKE_CASE__ : int = after_outputs[0]
SCREAMING_SNAKE_CASE__ : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowercase , 1E-5 )
@require_flax
class lowerCAmelCase_ (UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__lowercase , text_from_pt=__lowercase , )
SCREAMING_SNAKE_CASE__ : int = 13
SCREAMING_SNAKE_CASE__ : Any = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
SCREAMING_SNAKE_CASE__ : str = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
SCREAMING_SNAKE_CASE__ : List[str] = random_attention_mask([batch_size, 4] )
SCREAMING_SNAKE_CASE__ : List[str] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = FlaxViTModel(__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = FlaxBertModel(__lowercase )
return vision_model, text_model
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = FlaxViTModelTester(self )
SCREAMING_SNAKE_CASE__ : List[str] = FlaxBertModelTester(self )
SCREAMING_SNAKE_CASE__ : Dict = vit_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : int = bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Tuple = vision_config_and_inputs
SCREAMING_SNAKE_CASE__ : Optional[int] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class lowerCAmelCase_ (UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__lowercase , text_from_pt=__lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[int] = 13
SCREAMING_SNAKE_CASE__ : Union[str, Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
SCREAMING_SNAKE_CASE__ : List[Any] = random_attention_mask([batch_size, 4] )
SCREAMING_SNAKE_CASE__ : List[Any] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxCLIPVisionModel(__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = FlaxBertModel(__lowercase )
return vision_model, text_model
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = FlaxCLIPVisionModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] = FlaxBertModelTester(self )
SCREAMING_SNAKE_CASE__ : Optional[int] = clip_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Optional[int] = bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Optional[Any] = vision_config_and_inputs
SCREAMING_SNAKE_CASE__ : Any = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
SCREAMING_SNAKE_CASE__ : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=__lowercase , padding=__lowercase , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ : List[str] = model(**__lowercase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , __lowercase , atol=1E-3 ) )
| 25 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=7 , __lowercase=3 , __lowercase=1_8 , __lowercase=3_0 , __lowercase=4_0_0 , __lowercase=True , __lowercase=None , __lowercase=True , __lowercase=None , ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = size if size is not None else {'''shortest_edge''': 2_0}
lowerCAmelCase_ : Any = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : Any = batch_size
lowerCAmelCase_ : Optional[int] = num_channels
lowerCAmelCase_ : Tuple = image_size
lowerCAmelCase_ : List[str] = min_resolution
lowerCAmelCase_ : Dict = max_resolution
lowerCAmelCase_ : Tuple = do_resize
lowerCAmelCase_ : Optional[Any] = size
lowerCAmelCase_ : Union[str, Any] = do_center_crop
lowerCAmelCase_ : Optional[Any] = crop_size
def lowercase_ ( self ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = MobileNetVaImageProcessingTester(self )
@property
def lowercase_ ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowercase , '''size''' ) )
self.assertTrue(hasattr(__lowercase , '''do_center_crop''' ) )
self.assertTrue(hasattr(__lowercase , '''crop_size''' ) )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 2_0} )
self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} )
lowerCAmelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} )
def lowercase_ ( self ) -> Tuple:
pass
def lowercase_ ( self ) -> Union[str, Any]:
# Initialize image_processing
lowerCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
lowerCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Tuple = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase_ ( self ) -> Optional[int]:
# Initialize image_processing
lowerCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
lowerCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Tuple = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase_ ( self ) -> Any:
# Initialize image_processing
lowerCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
lowerCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Dict = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , ) | 262 | 0 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
__UpperCAmelCase = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
__UpperCAmelCase = """</w>"""
__UpperCAmelCase = """@@ """
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = set()
UpperCAmelCase_ : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ : int = char
return pairs
# Speech2Text2 has no max input length
__UpperCAmelCase = {"""facebook/s2t-wav2vec2-large-en-de""": 1024}
class lowerCamelCase (UpperCAmelCase__ ):
'''simple docstring'''
_snake_case : Any = VOCAB_FILES_NAMES
_snake_case : int = PRETRAINED_VOCAB_FILES_MAP
_snake_case : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : List[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , _UpperCamelCase , _UpperCamelCase="<s>" , _UpperCamelCase="<pad>" , _UpperCamelCase="</s>" , _UpperCamelCase="<unk>" , _UpperCamelCase=False , _UpperCamelCase=None , **_UpperCamelCase , ) -> List[str]:
super().__init__(
unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , do_lower_case=__lowercase , **__lowercase , )
UpperCAmelCase_ : Any = do_lower_case
with open(__lowercase , encoding='utf-8' ) as vocab_handle:
UpperCAmelCase_ : Tuple = json.load(__lowercase )
UpperCAmelCase_ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"No merges files provided. {self.__class__.__name__} can only be used for decoding." )
UpperCAmelCase_ : int = None
UpperCAmelCase_ : int = None
else:
with open(__lowercase , encoding='utf-8' ) as merges_handle:
UpperCAmelCase_ : List[str] = merges_handle.read().split('\n' )[:-1]
UpperCAmelCase_ : List[Any] = [tuple(merge.split()[:2] ) for merge in merges]
UpperCAmelCase_ : Any = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
UpperCAmelCase_ : Dict = {}
@property
def __UpperCAmelCase ( self ) -> int:
return len(self.decoder )
def __UpperCAmelCase ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> str:
UpperCAmelCase_ : str = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ : str = get_pairs(__lowercase )
if not pairs:
return token
while True:
UpperCAmelCase_ : int = min(__lowercase , key=lambda _UpperCamelCase : self.bpe_ranks.get(__lowercase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ : Tuple = bigram
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : List[Any] = 0
while i < len(__lowercase ):
try:
UpperCAmelCase_ : Tuple = word.index(__lowercase , __lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_ : Tuple = j
if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ : Optional[int] = tuple(__lowercase )
UpperCAmelCase_ : int = new_word
if len(__lowercase ) == 1:
break
else:
UpperCAmelCase_ : List[str] = get_pairs(__lowercase )
UpperCAmelCase_ : Optional[Any] = ''' '''.join(__lowercase )
if word == "\n " + BPE_TOKEN_MERGES:
UpperCAmelCase_ : Optional[Any] = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(__lowercase ):
UpperCAmelCase_ : Optional[int] = word.replace(__lowercase , '' )
UpperCAmelCase_ : Any = word.replace(' ' , __lowercase )
UpperCAmelCase_ : Optional[int] = word
return word
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Dict:
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
UpperCAmelCase_ : str = text.lower()
UpperCAmelCase_ : List[str] = text.split()
UpperCAmelCase_ : int = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__lowercase ).split(' ' ) ) )
return split_tokens
def __UpperCAmelCase ( self , _UpperCamelCase ) -> int:
return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> str:
UpperCAmelCase_ : Optional[Any] = self.decoder.get(__lowercase , self.unk_token )
return result
def __UpperCAmelCase ( self , _UpperCamelCase ) -> str:
UpperCAmelCase_ : Any = ''' '''.join(__lowercase )
# make sure @@ tokens are concatenated
UpperCAmelCase_ : List[Any] = ''''''.join(string.split(__lowercase ) )
return string
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(__lowercase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase_ : Union[str, Any] = os.path.join(
__lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase_ : Optional[Any] = os.path.join(
__lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(__lowercase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowercase , ensure_ascii=__lowercase ) + '\n' )
UpperCAmelCase_ : Tuple = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__lowercase , 'w' , encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
' Please check that the tokenizer is not corrupted!' )
UpperCAmelCase_ : List[str] = token_index
writer.write(' '.join(__lowercase ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 29 |
from __future__ import annotations
import math
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase ) -> None:
lowerCAmelCase_ : str = size
# approximate the overall size of segment tree with given value
lowerCAmelCase_ : Dict = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
lowerCAmelCase_ : Dict = [0 for i in range(0 , 4 * size )]
lowerCAmelCase_ : Optional[int] = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowercase_ ( self , __lowercase ) -> int:
return idx * 2
def lowercase_ ( self , __lowercase ) -> int:
return idx * 2 + 1
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> None:
if left_element == right_element:
lowerCAmelCase_ : Tuple = a[left_element - 1]
else:
lowerCAmelCase_ : int = (left_element + right_element) // 2
self.build(self.left(__lowercase ) , __lowercase , __lowercase , __lowercase )
self.build(self.right(__lowercase ) , mid + 1 , __lowercase , __lowercase )
lowerCAmelCase_ : Any = max(
self.segment_tree[self.left(__lowercase )] , self.segment_tree[self.right(__lowercase )] )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> bool:
if self.flag[idx] is True:
lowerCAmelCase_ : Union[str, Any] = self.lazy[idx]
lowerCAmelCase_ : Union[str, Any] = False
if left_element != right_element:
lowerCAmelCase_ : Union[str, Any] = self.lazy[idx]
lowerCAmelCase_ : Any = self.lazy[idx]
lowerCAmelCase_ : List[str] = True
lowerCAmelCase_ : Optional[Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
lowerCAmelCase_ : Dict = val
if left_element != right_element:
lowerCAmelCase_ : Union[str, Any] = val
lowerCAmelCase_ : List[Any] = val
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : List[str] = True
return True
lowerCAmelCase_ : Optional[Any] = (left_element + right_element) // 2
self.update(self.left(__lowercase ) , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
self.update(self.right(__lowercase ) , mid + 1 , __lowercase , __lowercase , __lowercase , __lowercase )
lowerCAmelCase_ : int = max(
self.segment_tree[self.left(__lowercase )] , self.segment_tree[self.right(__lowercase )] )
return True
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> int | float:
if self.flag[idx] is True:
lowerCAmelCase_ : Union[str, Any] = self.lazy[idx]
lowerCAmelCase_ : Optional[Any] = False
if left_element != right_element:
lowerCAmelCase_ : List[Any] = self.lazy[idx]
lowerCAmelCase_ : Dict = self.lazy[idx]
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : Optional[int] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
lowerCAmelCase_ : List[Any] = (left_element + right_element) // 2
lowerCAmelCase_ : Tuple = self.query(self.left(__lowercase ) , __lowercase , __lowercase , __lowercase , __lowercase )
lowerCAmelCase_ : List[Any] = self.query(self.right(__lowercase ) , mid + 1 , __lowercase , __lowercase , __lowercase )
return max(__lowercase , __lowercase )
def __str__( self ) -> str:
return str([self.query(1 , 1 , self.size , __lowercase , __lowercase ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_UpperCAmelCase : str =[1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
_UpperCAmelCase : List[str] =15
_UpperCAmelCase : Any =SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt) | 262 | 0 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
snake_case_ = """\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
snake_case_ = """\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
snake_case_ = """
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : Tuple ) -> Tuple:
return float((preds == labels).mean() )
def lowerCamelCase__ ( snake_case_ : Any , snake_case_ : Any , snake_case_ : List[Any]="binary" ) -> Union[str, Any]:
__snake_case = simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )
__snake_case = float(fa_score(y_true=lowerCAmelCase_ , y_pred=lowerCAmelCase_ , average=lowerCAmelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCamelCase__ ( snake_case_ : Optional[int] , snake_case_ : Dict ) -> Tuple:
__snake_case = {}
for id_pred, label in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
__snake_case = f"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
__snake_case = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
__snake_case = [(pred, label)]
__snake_case = [], []
for question, preds_labels in question_map.items():
__snake_case = zip(*lowerCAmelCase_ )
__snake_case = fa_score(y_true=lowerCAmelCase_ , y_pred=lowerCAmelCase_ , average='''macro''' )
fas.append(lowerCAmelCase_ )
__snake_case = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCAmelCase_ ) )
ems.append(lowerCAmelCase_ )
__snake_case = float(sum(lowerCAmelCase_ ) / len(lowerCAmelCase_ ) )
__snake_case = sum(lowerCAmelCase_ ) / len(lowerCAmelCase_ )
__snake_case = float(fa_score(y_true=lowerCAmelCase_ , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def a (self : Dict ):
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def a (self : Optional[Any] ):
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def a (self : Optional[int] , a__ : str , a__ : Any ):
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__lowercase , __lowercase )}
elif self.config_name == "cb":
return acc_and_fa(__lowercase , __lowercase , fa_avg='''macro''' )
elif self.config_name == "record":
__snake_case = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
__snake_case = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(__lowercase , __lowercase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__lowercase , __lowercase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__lowercase , __lowercase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 24 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_UpperCAmelCase : Optional[int] ="""src/transformers"""
_UpperCAmelCase : str ="""docs/source/en"""
_UpperCAmelCase : Optional[int] ="""."""
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]:
with open(lowerCAmelCase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase_ : int = f.readlines()
# Find the start prompt.
lowerCAmelCase_ : List[Any] = 0
while not lines[start_index].startswith(lowerCAmelCase_ ):
start_index += 1
start_index += 1
lowerCAmelCase_ : List[str] = start_index
while not lines[end_index].startswith(lowerCAmelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_UpperCAmelCase : Optional[Any] ="""Model|Encoder|Decoder|ForConditionalGeneration"""
# Regexes that match TF/Flax/PT model names.
_UpperCAmelCase : Optional[int] =re.compile(R"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
_UpperCAmelCase : Dict =re.compile(R"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_UpperCAmelCase : Optional[Any] =re.compile(R"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# This is to make sure the transformers module imported is the one in the repo.
_UpperCAmelCase : Optional[int] =direct_transformers_import(TRANSFORMERS_PATH)
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[int]:
lowerCAmelCase_ : str = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , lowerCAmelCase_ )
return [m.group(0 ) for m in matches]
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
lowerCAmelCase_ : Tuple = 2 if text == '''✅''' or text == '''❌''' else len(lowerCAmelCase_ )
lowerCAmelCase_ : int = (width - text_length) // 2
lowerCAmelCase_ : Union[str, Any] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowerCAmelCase ( )-> str:
lowerCAmelCase_ : Any = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowerCAmelCase_ : Dict = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowerCAmelCase_ : List[Any] = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowerCAmelCase_ : Tuple = collections.defaultdict(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = collections.defaultdict(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[Any] = collections.defaultdict(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = collections.defaultdict(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = collections.defaultdict(lowerCAmelCase_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowerCAmelCase_ ):
lowerCAmelCase_ : Optional[int] = None
if attr_name.endswith('''Tokenizer''' ):
lowerCAmelCase_ : Union[str, Any] = slow_tokenizers
lowerCAmelCase_ : List[str] = attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
lowerCAmelCase_ : int = fast_tokenizers
lowerCAmelCase_ : Union[str, Any] = attr_name[:-13]
elif _re_tf_models.match(lowerCAmelCase_ ) is not None:
lowerCAmelCase_ : Tuple = tf_models
lowerCAmelCase_ : str = _re_tf_models.match(lowerCAmelCase_ ).groups()[0]
elif _re_flax_models.match(lowerCAmelCase_ ) is not None:
lowerCAmelCase_ : Tuple = flax_models
lowerCAmelCase_ : Union[str, Any] = _re_flax_models.match(lowerCAmelCase_ ).groups()[0]
elif _re_pt_models.match(lowerCAmelCase_ ) is not None:
lowerCAmelCase_ : Any = pt_models
lowerCAmelCase_ : List[Any] = _re_pt_models.match(lowerCAmelCase_ ).groups()[0]
if lookup_dict is not None:
while len(lowerCAmelCase_ ) > 0:
if attr_name in model_name_to_prefix.values():
lowerCAmelCase_ : Union[str, Any] = True
break
# Try again after removing the last word in the name
lowerCAmelCase_ : Any = ''''''.join(camel_case_split(lowerCAmelCase_ )[:-1] )
# Let's build that table!
lowerCAmelCase_ : int = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
lowerCAmelCase_ : Tuple = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowerCAmelCase_ : Union[str, Any] = [len(lowerCAmelCase_ ) + 2 for c in columns]
lowerCAmelCase_ : Optional[Any] = max([len(lowerCAmelCase_ ) for name in model_names] ) + 2
# Build the table per se
lowerCAmelCase_ : Dict = '''|''' + '''|'''.join([_center_text(lowerCAmelCase_ , lowerCAmelCase_ ) for c, w in zip(lowerCAmelCase_ , lowerCAmelCase_ )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
lowerCAmelCase_ : List[str] = {True: '''✅''', False: '''❌'''}
for name in model_names:
lowerCAmelCase_ : List[Any] = model_name_to_prefix[name]
lowerCAmelCase_ : Union[str, Any] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowerCAmelCase_ , lowerCAmelCase_ ) for l, w in zip(lowerCAmelCase_ , lowerCAmelCase_ )] ) + "|\n"
return table
def lowerCAmelCase ( lowerCAmelCase_=False )-> Tuple:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = _find_text_in_file(
filename=os.path.join(lowerCAmelCase_ , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , )
lowerCAmelCase_ : Tuple = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowerCAmelCase_ , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] =argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_UpperCAmelCase : Tuple =parser.parse_args()
check_model_table(args.fix_and_overwrite) | 262 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : str ) -> bool:
'''simple docstring'''
UpperCAmelCase_ = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 1 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCAmelCase ( )-> int:
lowerCAmelCase_ : int = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
lowerCAmelCase_ : Dict = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(lowerCAmelCase_ )
DownloadCommand.register_subcommand(lowerCAmelCase_ )
EnvironmentCommand.register_subcommand(lowerCAmelCase_ )
RunCommand.register_subcommand(lowerCAmelCase_ )
ServeCommand.register_subcommand(lowerCAmelCase_ )
UserCommands.register_subcommand(lowerCAmelCase_ )
AddNewModelCommand.register_subcommand(lowerCAmelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCAmelCase_ )
LfsCommands.register_subcommand(lowerCAmelCase_ )
PTtoTFCommand.register_subcommand(lowerCAmelCase_ )
# Let's go
lowerCAmelCase_ : Union[str, Any] = parser.parse_args()
if not hasattr(lowerCAmelCase_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
lowerCAmelCase_ : List[Any] = args.func(lowerCAmelCase_ )
service.run()
if __name__ == "__main__":
main() | 262 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[Any] = logging.get_logger(__name__)
a : List[str] = {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/config.json""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/config.json""",
"""funnel-transformer/medium-base""": """https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json""",
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/config.json""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json""",
"""funnel-transformer/xlarge-base""": """https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json""",
}
class a ( UpperCAmelCase__ ):
snake_case_ = """funnel"""
snake_case_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
}
def __init__( self : List[Any] , lowercase_ : Any=3_0522 , lowercase_ : Any=[4, 4, 4] , lowercase_ : Tuple=None , lowercase_ : str=2 , lowercase_ : Union[str, Any]=768 , lowercase_ : Union[str, Any]=12 , lowercase_ : Dict=64 , lowercase_ : List[Any]=3072 , lowercase_ : Optional[Any]="gelu_new" , lowercase_ : Dict=0.1 , lowercase_ : int=0.1 , lowercase_ : int=0.0 , lowercase_ : Optional[int]=0.1 , lowercase_ : Union[str, Any]=None , lowercase_ : str=1e-9 , lowercase_ : List[str]="mean" , lowercase_ : int="relative_shift" , lowercase_ : List[str]=True , lowercase_ : Any=True , lowercase_ : Optional[Any]=True , **lowercase_ : Any , ):
snake_case_ = vocab_size
snake_case_ = block_sizes
snake_case_ = [1] * len(__lowercase ) if block_repeats is None else block_repeats
assert len(__lowercase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
snake_case_ = num_decoder_layers
snake_case_ = d_model
snake_case_ = n_head
snake_case_ = d_head
snake_case_ = d_inner
snake_case_ = hidden_act
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = initializer_range
snake_case_ = initializer_std
snake_case_ = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."
snake_case_ = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."
snake_case_ = attention_type
snake_case_ = separate_cls
snake_case_ = truncate_seq
snake_case_ = pool_q_only
super().__init__(**__lowercase )
@property
def A_ ( self : List[str] ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def A_ ( self : Any , lowercase_ : List[str] ):
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def A_ ( self : Tuple ):
return len(self.block_sizes )
@num_blocks.setter
def A_ ( self : List[Any] , lowercase_ : Dict ):
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 56 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_UpperCAmelCase : Tuple =None
_UpperCAmelCase : int =logging.get_logger(__name__)
_UpperCAmelCase : Dict ={"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Any ={
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : int ={
"""facebook/nllb-large-en-ro""": 1024,
"""facebook/nllb-200-distilled-600M""": 1024,
}
# fmt: off
_UpperCAmelCase : Any =["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE__ : int = NllbTokenizer
SCREAMING_SNAKE_CASE__ : List[int] = []
SCREAMING_SNAKE_CASE__ : List[int] = []
def __init__( self , __lowercase=None , __lowercase=None , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=False , **__lowercase , ) -> int:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : int = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
lowerCAmelCase_ : List[Any] = legacy_behaviour
super().__init__(
vocab_file=__lowercase , tokenizer_file=__lowercase , bos_token=__lowercase , eos_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , src_lang=__lowercase , tgt_lang=__lowercase , additional_special_tokens=__lowercase , legacy_behaviour=__lowercase , **__lowercase , )
lowerCAmelCase_ : Any = vocab_file
lowerCAmelCase_ : List[Any] = False if not self.vocab_file else True
lowerCAmelCase_ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
lowerCAmelCase_ : Optional[Any] = {
lang_code: self.convert_tokens_to_ids(__lowercase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCAmelCase_ : Any = src_lang if src_lang is not None else '''eng_Latn'''
lowerCAmelCase_ : str = self.convert_tokens_to_ids(self._src_lang )
lowerCAmelCase_ : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowercase_ ( self ) -> str:
return self._src_lang
@src_lang.setter
def lowercase_ ( self , __lowercase ) -> None:
lowerCAmelCase_ : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , **__lowercase ) -> str:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowerCAmelCase_ : List[str] = src_lang
lowerCAmelCase_ : int = self(__lowercase , add_special_tokens=__lowercase , return_tensors=__lowercase , **__lowercase )
lowerCAmelCase_ : Dict = self.convert_tokens_to_ids(__lowercase )
lowerCAmelCase_ : List[Any] = tgt_lang_id
return inputs
def lowercase_ ( self , __lowercase , __lowercase = "eng_Latn" , __lowercase = None , __lowercase = "fra_Latn" , **__lowercase , ) -> BatchEncoding:
lowerCAmelCase_ : List[str] = src_lang
lowerCAmelCase_ : List[str] = tgt_lang
return super().prepare_seqaseq_batch(__lowercase , __lowercase , **__lowercase )
def lowercase_ ( self ) -> List[Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def lowercase_ ( self ) -> str:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase_ ( self , __lowercase ) -> None:
lowerCAmelCase_ : List[str] = self.convert_tokens_to_ids(__lowercase )
if self.legacy_behaviour:
lowerCAmelCase_ : Any = []
lowerCAmelCase_ : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase_ : Optional[int] = [self.cur_lang_code]
lowerCAmelCase_ : List[Any] = [self.eos_token_id]
lowerCAmelCase_ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase_ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase_ : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowercase_ ( self , __lowercase ) -> None:
lowerCAmelCase_ : Dict = self.convert_tokens_to_ids(__lowercase )
if self.legacy_behaviour:
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : Any = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase_ : Any = [self.cur_lang_code]
lowerCAmelCase_ : Any = [self.eos_token_id]
lowerCAmelCase_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase_ : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase_ : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
lowerCAmelCase_ : Any = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,) | 262 | 0 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
a : List[Any] =IFInpaintingSuperResolutionPipeline
a : Optional[int] =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
a : List[Any] =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
a : List[Any] =PipelineTesterMixin.required_optional_params - {"""latents"""}
def lowercase__ ( self ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def lowercase__ ( self , snake_case__ , snake_case__=0 ):
"""simple docstring"""
if str(__lowercase ).startswith("mps" ):
lowerCAmelCase : Union[str, Any] = torch.manual_seed(__lowercase )
else:
lowerCAmelCase : int = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
lowerCAmelCase : Tuple = floats_tensor((1, 3, 16, 16) , rng=random.Random(__lowercase ) ).to(__lowercase )
lowerCAmelCase : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase ) ).to(__lowercase )
lowerCAmelCase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase ) ).to(__lowercase )
lowerCAmelCase : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowercase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowercase__ ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def lowercase__ ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowercase__ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowercase__ ( self ):
"""simple docstring"""
self._test_save_load_local()
def lowercase__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 108 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
_UpperCAmelCase : Optional[Any] =NewType("""DataClass""", Any)
_UpperCAmelCase : Dict =NewType("""DataClassType""", Any)
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def lowerCAmelCase ( lowerCAmelCase_ )-> Callable[[str], Any]:
lowerCAmelCase_ : str = {str(lowerCAmelCase_ ): choice for choice in choices}
return lambda lowerCAmelCase_ : str_to_choice.get(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( *,
lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = dataclasses.MISSING , lowerCAmelCase_ = dataclasses.MISSING , lowerCAmelCase_ = None , **lowerCAmelCase_ , )-> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowerCAmelCase_ : Dict = {}
if aliases is not None:
lowerCAmelCase_ : str = aliases
if help is not None:
lowerCAmelCase_ : Tuple = help
return dataclasses.field(metadata=lowerCAmelCase_ , default=lowerCAmelCase_ , default_factory=lowerCAmelCase_ , **lowerCAmelCase_ )
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Iterable[DataClassType]
def __init__( self , __lowercase , **__lowercase ) -> List[str]:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
lowerCAmelCase_ : Optional[int] = ArgumentDefaultsHelpFormatter
super().__init__(**__lowercase )
if dataclasses.is_dataclass(__lowercase ):
lowerCAmelCase_ : Union[str, Any] = [dataclass_types]
lowerCAmelCase_ : List[Any] = list(__lowercase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__lowercase )
@staticmethod
def lowercase_ ( __lowercase , __lowercase ) -> Union[str, Any]:
lowerCAmelCase_ : Optional[Any] = f"""--{field.name}"""
lowerCAmelCase_ : Tuple = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __lowercase ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
lowerCAmelCase_ : List[str] = kwargs.pop('''aliases''' , [] )
if isinstance(__lowercase , __lowercase ):
lowerCAmelCase_ : Optional[Any] = [aliases]
lowerCAmelCase_ : Any = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(__lowercase , '''UnionType''' ) and isinstance(__lowercase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__lowercase ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
f""" Problem encountered in field '{field.name}'.""" )
if type(__lowercase ) not in field.type.__args__:
# filter `str` in Union
lowerCAmelCase_ : List[Any] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowerCAmelCase_ : Dict = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowerCAmelCase_ : str = (
field.type.__args__[0] if isinstance(__lowercase , field.type.__args__[1] ) else field.type.__args__[1]
)
lowerCAmelCase_ : List[Any] = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowerCAmelCase_ : Dict = {}
if origin_type is Literal or (isinstance(field.type , __lowercase ) and issubclass(field.type , __lowercase )):
if origin_type is Literal:
lowerCAmelCase_ : Optional[Any] = field.type.__args__
else:
lowerCAmelCase_ : int = [x.value for x in field.type]
lowerCAmelCase_ : str = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
lowerCAmelCase_ : str = field.default
else:
lowerCAmelCase_ : Tuple = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowerCAmelCase_ : Tuple = copy(__lowercase )
# Hack because type=bool in argparse does not behave as we want.
lowerCAmelCase_ : Dict = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowerCAmelCase_ : Union[str, Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowerCAmelCase_ : List[str] = default
# This tells argparse we accept 0 or 1 value after --field_name
lowerCAmelCase_ : int = '''?'''
# This is the value that will get picked if we do --field_name (without value)
lowerCAmelCase_ : List[Any] = True
elif isclass(__lowercase ) and issubclass(__lowercase , __lowercase ):
lowerCAmelCase_ : Union[str, Any] = field.type.__args__[0]
lowerCAmelCase_ : Dict = '''+'''
if field.default_factory is not dataclasses.MISSING:
lowerCAmelCase_ : Any = field.default_factory()
elif field.default is dataclasses.MISSING:
lowerCAmelCase_ : Optional[int] = True
else:
lowerCAmelCase_ : List[Any] = field.type
if field.default is not dataclasses.MISSING:
lowerCAmelCase_ : Dict = field.default
elif field.default_factory is not dataclasses.MISSING:
lowerCAmelCase_ : List[Any] = field.default_factory()
else:
lowerCAmelCase_ : int = True
parser.add_argument(__lowercase , *__lowercase , **__lowercase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowerCAmelCase_ : Any = False
parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **__lowercase )
def lowercase_ ( self , __lowercase ) -> List[Any]:
if hasattr(__lowercase , '''_argument_group_name''' ):
lowerCAmelCase_ : str = self.add_argument_group(dtype._argument_group_name )
else:
lowerCAmelCase_ : Dict = self
try:
lowerCAmelCase_ : Dict[str, type] = get_type_hints(__lowercase )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(__lowercase ):
lowerCAmelCase_ : Any = '''.'''.join(map(__lowercase , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(__lowercase ):
if not field.init:
continue
lowerCAmelCase_ : Optional[int] = type_hints[field.name]
self._parse_dataclass_field(__lowercase , __lowercase )
def lowercase_ ( self , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=None , __lowercase=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowerCAmelCase_ : str = []
if args_filename:
args_files.append(Path(__lowercase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowerCAmelCase_ : str = ArgumentParser()
args_file_parser.add_argument(__lowercase , type=__lowercase , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = args_file_parser.parse_known_args(args=__lowercase )
lowerCAmelCase_ : int = vars(__lowercase ).get(args_file_flag.lstrip('''-''' ) , __lowercase )
if cmd_args_file_paths:
args_files.extend([Path(__lowercase ) for p in cmd_args_file_paths] )
lowerCAmelCase_ : Dict = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowerCAmelCase_ : Any = file_args + args if args is not None else file_args + sys.argv[1:]
lowerCAmelCase_ , lowerCAmelCase_ : Dict = self.parse_known_args(args=__lowercase )
lowerCAmelCase_ : Any = []
for dtype in self.dataclass_types:
lowerCAmelCase_ : str = {f.name for f in dataclasses.fields(__lowercase ) if f.init}
lowerCAmelCase_ : str = {k: v for k, v in vars(__lowercase ).items() if k in keys}
for k in keys:
delattr(__lowercase , __lowercase )
lowerCAmelCase_ : Optional[int] = dtype(**__lowercase )
outputs.append(__lowercase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__lowercase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def lowercase_ ( self , __lowercase , __lowercase = False ) -> Tuple[DataClass, ...]:
lowerCAmelCase_ : int = set(args.keys() )
lowerCAmelCase_ : str = []
for dtype in self.dataclass_types:
lowerCAmelCase_ : int = {f.name for f in dataclasses.fields(__lowercase ) if f.init}
lowerCAmelCase_ : List[str] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowerCAmelCase_ : List[str] = dtype(**__lowercase )
outputs.append(__lowercase )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__lowercase )}""" )
return tuple(__lowercase )
def lowercase_ ( self , __lowercase , __lowercase = False ) -> Tuple[DataClass, ...]:
with open(Path(__lowercase ) , encoding='''utf-8''' ) as open_json_file:
lowerCAmelCase_ : Dict = json.loads(open_json_file.read() )
lowerCAmelCase_ : str = self.parse_dict(__lowercase , allow_extra_keys=__lowercase )
return tuple(__lowercase )
def lowercase_ ( self , __lowercase , __lowercase = False ) -> Tuple[DataClass, ...]:
lowerCAmelCase_ : Optional[Any] = self.parse_dict(yaml.safe_load(Path(__lowercase ).read_text() ) , allow_extra_keys=__lowercase )
return tuple(__lowercase ) | 262 | 0 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=3 , __UpperCAmelCase=32 , __UpperCAmelCase=3 , __UpperCAmelCase=10 , __UpperCAmelCase=[8, 16, 32, 64] , __UpperCAmelCase=[1, 1, 2, 1] , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="relu" , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=["stage2", "stage3", "stage4"] , __UpperCAmelCase=[2, 3, 4] , __UpperCAmelCase=1 , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = num_channels
__lowerCamelCase = embeddings_size
__lowerCamelCase = hidden_sizes
__lowerCamelCase = depths
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = hidden_act
__lowerCamelCase = num_labels
__lowerCamelCase = scope
__lowerCamelCase = len(__lowercase )
__lowerCamelCase = out_features
__lowerCamelCase = out_indices
__lowerCamelCase = num_groups
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self ):
'''simple docstring'''
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = BitModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__lowerCamelCase = model(__lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = BitForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
__lowerCamelCase = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = BitBackbone(config=__lowercase )
model.to(__lowercase )
model.eval()
__lowerCamelCase = model(__lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__lowerCamelCase = None
__lowerCamelCase = BitBackbone(config=__lowercase )
model.to(__lowercase )
model.eval()
__lowerCamelCase = model(__lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase = config_and_inputs
__lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowerCAmelCase__ = (
{"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = BitModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='''Bit does not output attentions''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(__lowercase )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowercase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(config=__lowercase )
for name, module in model.named_modules():
if isinstance(__lowercase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__lowerCamelCase = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(__lowercase , __lowercase ) )
__lowerCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCamelCase = self.model_tester.num_stages
self.assertEqual(len(__lowercase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__lowerCamelCase = layer_type
__lowerCamelCase = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = BitModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def a__ ( ):
__lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self ):
'''simple docstring'''
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowercase )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase )
# forward pass
with torch.no_grad():
__lowerCamelCase = model(**__lowercase )
# verify the logits
__lowerCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
__lowerCamelCase = torch.tensor([[-0.6_526, -0.5_263, -1.4_398]] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) )
@require_torch
class __lowerCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (BitBackbone,) if is_torch_available() else ()
lowerCAmelCase__ = BitConfig
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = BitModelTester(self )
| 330 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def lowerCAmelCase ( lowerCAmelCase_ )-> Union[str, Any]:
return EnvironmentCommand()
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
@staticmethod
def lowercase_ ( __lowercase ) -> List[Any]:
lowerCAmelCase_ : List[str] = parser.add_parser('''env''' )
download_parser.set_defaults(func=__lowercase )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Optional[Any] = huggingface_hub.__version__
lowerCAmelCase_ : str = '''not installed'''
lowerCAmelCase_ : str = '''NA'''
if is_torch_available():
import torch
lowerCAmelCase_ : Any = torch.__version__
lowerCAmelCase_ : str = torch.cuda.is_available()
lowerCAmelCase_ : List[str] = '''not installed'''
if is_transformers_available():
import transformers
lowerCAmelCase_ : Any = transformers.__version__
lowerCAmelCase_ : Optional[Any] = '''not installed'''
if is_accelerate_available():
import accelerate
lowerCAmelCase_ : List[Any] = accelerate.__version__
lowerCAmelCase_ : List[str] = '''not installed'''
if is_xformers_available():
import xformers
lowerCAmelCase_ : Optional[Any] = xformers.__version__
lowerCAmelCase_ : int = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(__lowercase ) )
return info
@staticmethod
def lowercase_ ( __lowercase ) -> str:
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n" | 262 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class __A ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase : List[str] = """bert-generation"""
def __init__( self : Any ,_snake_case : Tuple=50_358 ,_snake_case : Union[str, Any]=1_024 ,_snake_case : Any=24 ,_snake_case : Optional[Any]=16 ,_snake_case : Tuple=4_096 ,_snake_case : Optional[Any]="gelu" ,_snake_case : Tuple=0.1 ,_snake_case : Optional[int]=0.1 ,_snake_case : Dict=512 ,_snake_case : Union[str, Any]=0.02 ,_snake_case : Optional[Any]=1e-12 ,_snake_case : Optional[Any]=0 ,_snake_case : int=2 ,_snake_case : Tuple=1 ,_snake_case : int="absolute" ,_snake_case : Tuple=True ,**_snake_case : Optional[Any] ,) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=__lowercase ,bos_token_id=__lowercase ,eos_token_id=__lowercase ,**__lowercase )
lowercase__ : List[str] = vocab_size
lowercase__ : Any = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : str = num_attention_heads
lowercase__ : Optional[Any] = hidden_act
lowercase__ : Tuple = intermediate_size
lowercase__ : int = hidden_dropout_prob
lowercase__ : Optional[int] = attention_probs_dropout_prob
lowercase__ : Any = max_position_embeddings
lowercase__ : List[Any] = initializer_range
lowercase__ : Dict = layer_norm_eps
lowercase__ : Optional[Any] = position_embedding_type
lowercase__ : Dict = use_cache
| 16 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class snake_case__( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = JukeboxTokenizer
SCREAMING_SNAKE_CASE__ : int = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def lowercase_ ( self ) -> Union[str, Any]:
import torch
lowerCAmelCase_ : Union[str, Any] = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
lowerCAmelCase_ : Any = tokenizer(**self.metas )['''input_ids''']
# fmt: off
lowerCAmelCase_ : List[str] = [
torch.tensor([[
0, 0, 0, 7_1_6_9, 5_0_7, 9, 7_6, 3_9, 3_1, 4_6, 7_6, 2_7,
7_6, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8, 3_1, 4_4, 7_6, 3_2,
4_4, 4_1, 3_9, 7_6, 2_7, 4_0, 7_6, 2_7, 4_0, 4_6, 3_5, 4_3,
4_7, 3_1, 7_6, 3_8, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 4_1, 7_6, 4_5, 2_7, 3_5,
3_0, 7_6, 7_1, 2_0, 4_9, 4_1, 7_6, 4_8, 2_7, 4_5, 4_6, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 4_4, 4_7, 4_0, 3_7, 3_8, 3_1, 4_5,
4_5, 7_6, 3_8, 3_1, 3_3, 4_5, 7_6, 4_1, 3_2, 7_6, 4_5, 4_6,
4_1, 4_0, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
1_9, 4_6, 2_7, 4_0, 3_0, 7_6, 3_5, 4_0, 7_6, 4_6, 3_4, 3_1,
7_6, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3, 7_6, 6_3, 7_6, 6_3,
7_6, 6_3, 7_6, 1_4, 3_1, 2_7, 4_4, 7_6, 4_6, 3_4, 3_1, 3_9,
6_4, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_5, 2_7, 4_0,
3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 8,
2_7, 3_8, 3_2, 7_6, 4_5, 4_7, 4_0, 3_7, 7_6, 2_7, 7_6, 4_5,
3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0, 7_6, 4_8, 3_5, 4_5,
2_7, 3_3, 3_1, 7_6, 3_8, 3_5, 3_1, 4_5, 6_4, 7_6, 4_9, 3_4,
4_1, 4_5, 3_1, 7_6, 3_2, 4_4, 4_1, 4_9, 4_0, 6_4, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6, 4_9,
4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_6, 3_8, 3_5, 4_2, 6_4,
7_6, 2_7, 4_0, 3_0, 7_6, 4_5, 4_0, 3_1, 3_1, 4_4, 7_6, 4_1,
3_2, 7_6, 2_9, 4_1, 3_8, 3_0, 7_6, 2_9, 4_1, 3_9, 3_9, 2_7,
4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_5, 4_6,
4_5, 7_6, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6, 4_1, 4_4, 7_6, 4_9,
3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 4_1, 4_5, 3_1, 7_6, 4_2, 2_7,
4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_6, 4_4, 3_1, 2_7, 3_0, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 3_5, 2_9,
3_4, 7_6, 5_1, 3_1, 4_6, 7_6, 4_5, 4_7, 4_4, 4_8, 3_5, 4_8,
3_1, 6_4, 7_6, 4_5, 4_6, 2_7, 3_9, 4_2, 3_1, 3_0, 7_6, 4_1,
4_0, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 3_8, 3_5, 3_2, 3_1,
3_8, 3_1, 4_5, 4_5, 7_6, 4_6, 3_4, 3_5, 4_0, 3_3, 4_5, 6_4,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_4, 3_1,
7_6, 3_4, 2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_9,
4_1, 2_9, 3_7, 3_1, 3_0, 7_6, 4_6, 3_4, 3_1, 3_9, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_4, 3_1, 2_7, 4_4,
4_6, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_2, 3_1, 3_0, 6_6, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6,
4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_2, 3_1, 3_0, 3_1, 4_5,
4_6, 2_7, 3_8, 6_4, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 4_9,
4_1, 4_4, 3_0, 4_5, 7_6, 2_7, 4_2, 4_2, 3_1, 2_7, 4_4, 6_5,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_3, 5_1, 7_6,
4_0, 2_7, 3_9, 3_1, 7_6, 3_5, 4_5, 7_6, 1_5, 5_2, 5_1, 3_9,
2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_6, 1_1, 3_5, 4_0, 3_3,
7_6, 4_1, 3_2, 7_6, 1_1, 3_5, 4_0, 3_3, 4_5, 6_6, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_2, 4_1, 4_1, 3_7, 7_6,
4_1, 4_0, 7_6, 3_9, 5_1, 7_6, 2_3, 4_1, 4_4, 3_7, 4_5, 6_4,
7_6, 5_1, 3_1, 7_6, 1_3, 3_5, 3_3, 3_4, 4_6, 5_1, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 3_0, 3_1, 4_5, 4_2, 2_7, 3_5, 4_4, 6_7,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_4, 4_1, 4_6,
3_4, 3_5, 4_0, 3_3, 7_6, 2_8, 3_1, 4_5, 3_5, 3_0, 3_1, 7_6,
4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3, 7_6, 1_8, 4_1, 4_7,
4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_0, 3_1, 2_9, 2_7, 5_1,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_5, 3_2, 7_6,
4_6, 3_4, 2_7, 4_6, 7_6, 2_9, 4_1, 3_8, 4_1, 4_5, 4_5, 2_7,
3_8, 7_6, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4, 7_6, 2_8, 4_1, 4_7,
4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_6, 2_7, 4_0, 3_0, 7_6, 2_8,
2_7, 4_4, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_4, 3_1, 7_6, 3_8, 4_1, 4_0, 3_1, 7_6, 2_7, 4_0, 3_0,
7_6, 3_8, 3_1, 4_8, 3_1, 3_8, 7_6, 4_5, 2_7, 4_0, 3_0, 4_5,
7_6, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4, 7_6, 3_2, 2_7, 4_4,
7_6, 2_7, 4_9, 2_7, 5_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
7_6, 7_6]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def lowercase_ ( self ) -> List[Any]:
import torch
lowerCAmelCase_ : Any = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
lowerCAmelCase_ : str = tokenizer(**self.metas )['''input_ids''']
# fmt: off
lowerCAmelCase_ : Tuple = [
torch.tensor([[
0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1, 9, 7_7, 3_9,
3_1, 4_6, 7_7, 2_7, 7_7, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8,
3_1, 4_4, 7_7, 3_2, 4_4, 4_1, 3_9, 7_7, 2_7, 4_0, 7_7, 2_7,
4_0, 4_6, 3_5, 4_3, 4_7, 3_1, 7_7, 3_8, 2_7, 4_0, 3_0, 6_4,
7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_3, 3_4, 4_1,
7_7, 4_5, 2_7, 3_5, 3_0, 7_7, 7_2, 2_0, 4_9, 4_1, 7_7, 4_8,
2_7, 4_5, 4_6, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 4_4, 4_7, 4_0,
3_7, 3_8, 3_1, 4_5, 4_5, 7_7, 3_8, 3_1, 3_3, 4_5, 7_7, 4_1,
3_2, 7_7, 4_5, 4_6, 4_1, 4_0, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 1_9, 4_6, 2_7, 4_0, 3_0, 7_7, 3_5, 4_0,
7_7, 4_6, 3_4, 3_1, 7_7, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3,
7_7, 6_3, 7_7, 6_3, 7_7, 6_3, 7_7, 1_4, 3_1, 2_7, 4_4, 7_7,
4_6, 3_4, 3_1, 3_9, 6_4, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1,
7_7, 4_5, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 8, 2_7, 3_8, 3_2, 7_7, 4_5, 4_7, 4_0, 3_7,
7_7, 2_7, 7_7, 4_5, 3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0,
7_7, 4_8, 3_5, 4_5, 2_7, 3_3, 3_1, 7_7, 3_8, 3_5, 3_1, 4_5,
6_4, 7_7, 4_9, 3_4, 4_1, 4_5, 3_1, 7_7, 3_2, 4_4, 4_1, 4_9,
4_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1,
4_0, 3_0, 7_7, 4_9, 4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_7,
3_8, 3_5, 4_2, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_5, 4_0, 3_1,
3_1, 4_4, 7_7, 4_1, 3_2, 7_7, 2_9, 4_1, 3_8, 3_0, 7_7, 2_9,
4_1, 3_9, 3_9, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 2_7,
4_6, 7_7, 3_5, 4_6, 4_5, 7_7, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6,
4_1, 4_4, 7_7, 4_9, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 4_1, 4_5,
3_1, 7_7, 4_2, 2_7, 4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_7, 4_4,
3_1, 2_7, 3_0, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
2_3, 3_4, 3_5, 2_9, 3_4, 7_7, 5_1, 3_1, 4_6, 7_7, 4_5, 4_7,
4_4, 4_8, 3_5, 4_8, 3_1, 6_4, 7_7, 4_5, 4_6, 2_7, 3_9, 4_2,
3_1, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 4_5, 3_1, 7_7,
3_8, 3_5, 3_2, 3_1, 3_8, 3_1, 4_5, 4_5, 7_7, 4_6, 3_4, 3_5,
4_0, 3_3, 4_5, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 2_0, 3_4, 3_1, 7_7, 3_4, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4,
2_7, 4_6, 7_7, 3_9, 4_1, 2_9, 3_7, 3_1, 3_0, 7_7, 4_6, 3_4,
3_1, 3_9, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7,
3_4, 3_1, 2_7, 4_4, 4_6, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_2,
3_1, 3_0, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
1, 4_0, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 4_2,
3_1, 3_0, 3_1, 4_5, 4_6, 2_7, 3_8, 6_4, 7_7, 4_6, 3_4, 3_1,
4_5, 3_1, 7_7, 4_9, 4_1, 4_4, 3_0, 4_5, 7_7, 2_7, 4_2, 4_2,
3_1, 2_7, 4_4, 6_5, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_3, 5_1, 7_7, 4_0, 2_7, 3_9, 3_1, 7_7, 3_5, 4_5, 7_7,
1_5, 5_2, 5_1, 3_9, 2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_7,
1_1, 3_5, 4_0, 3_3, 7_7, 4_1, 3_2, 7_7, 1_1, 3_5, 4_0, 3_3,
4_5, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_2,
4_1, 4_1, 3_7, 7_7, 4_1, 4_0, 7_7, 3_9, 5_1, 7_7, 2_3, 4_1,
4_4, 3_7, 4_5, 6_4, 7_7, 5_1, 3_1, 7_7, 1_3, 3_5, 3_3, 3_4,
4_6, 5_1, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 3_0, 3_1, 4_5, 4_2,
2_7, 3_5, 4_4, 6_7, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_4, 4_1, 4_6, 3_4, 3_5, 4_0, 3_3, 7_7, 2_8, 3_1, 4_5,
3_5, 3_0, 3_1, 7_7, 4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3,
7_7, 1_8, 4_1, 4_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_0,
3_1, 2_9, 2_7, 5_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_5, 3_2, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 2_9, 4_1, 3_8,
4_1, 4_5, 4_5, 2_7, 3_8, 7_7, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4,
7_7, 2_8, 4_1, 4_7, 4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_7, 2_7,
4_0, 3_0, 7_7, 2_8, 2_7, 4_4, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_4, 3_1, 7_7, 3_8, 4_1, 4_0, 3_1,
7_7, 2_7, 4_0, 3_0, 7_7, 3_8, 3_1, 4_8, 3_1, 3_8, 7_7, 4_5,
2_7, 4_0, 3_0, 4_5, 7_7, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4,
7_7, 3_2, 2_7, 4_4, 7_7, 2_7, 4_9, 2_7, 5_1, 7_9, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 7_7, 7_7]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) | 262 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a : Dict = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] , _lowercase : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
return (preds == labels).mean()
@dataclass
class __UpperCamelCase :
lowerCamelCase : str =field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowerCamelCase : Optional[str] =field(
default=UpperCAmelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowerCamelCase : Optional[str] =field(
default=UpperCAmelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowerCamelCase : Optional[str] =field(
default=UpperCAmelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __UpperCamelCase :
lowerCamelCase : str =field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
lowerCamelCase : str =field(metadata={"""help""": """Should contain the data files for the task."""} )
lowerCamelCase : int =field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _SCREAMING_SNAKE_CASE ( ) ->int:
'''simple docstring'''
a : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
a : Any = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , lowerCAmelCase_ )
# Set seed
set_seed(training_args.seed )
try:
a : List[str] = processors[data_args.task_name]()
a : Dict = processor.get_labels()
a : Optional[Any] = len(lowerCAmelCase_ )
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
a : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a : Optional[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , )
# Get datasets
a : Optional[Any] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCAmelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
a : List[str] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCAmelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_lowercase : int ) -> Dict:
a : Dict = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(lowerCAmelCase_ , p.label_ids )}
# Data collator
a : Optional[Any] = DataCollatorWithPadding(lowerCAmelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
a : List[str] = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=lowerCAmelCase_ , eval_dataset=lowerCAmelCase_ , compute_metrics=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a : Tuple = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a : int = trainer.evaluate()
a : List[str] = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_master():
with open(lowerCAmelCase_ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , lowerCAmelCase_ , lowerCAmelCase_ )
writer.write("%s = %s\n" % (key, value) )
results.update(lowerCAmelCase_ )
return results
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] ) ->str:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 105 |
from __future__ import annotations
import requests
def lowerCAmelCase ( lowerCAmelCase_ )-> dict:
lowerCAmelCase_ : List[Any] = f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(lowerCAmelCase_ ).json()
def lowerCAmelCase ( lowerCAmelCase_ = 10 )-> list[dict]:
lowerCAmelCase_ : List[Any] = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
lowerCAmelCase_ : Tuple = requests.get(lowerCAmelCase_ ).json()[:max_stories]
return [get_hackernews_story(lowerCAmelCase_ ) for story_id in story_ids]
def lowerCAmelCase ( lowerCAmelCase_ = 10 )-> str:
lowerCAmelCase_ : Optional[Any] = hackernews_top_stories(lowerCAmelCase_ )
return "\n".join('''* [{title}]({url})'''.format(**lowerCAmelCase_ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown()) | 262 | 0 |
"""simple docstring"""
from math import sqrt
def lowercase__ ( _UpperCAmelCase ) -> bool:
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowercase : List[Any] = True
# 0 and 1 are none primes.
if number <= 1:
lowercase : Optional[int] = False
for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowercase : Tuple = False
break
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool"
return status
def lowercase__ ( _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowercase : Tuple = list(range(2 , n + 1 ) )
lowercase : Optional[int] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowercase : str = 0
# filters actual prime numbers.
lowercase : Optional[int] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowercase__ ( _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowercase : List[Any] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCAmelCase_ ):
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowercase__ ( _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowercase : int = [] # this list will be returns of the function.
# potential prime number factors.
lowercase : List[Any] = 2
lowercase : Optional[int] = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase_ ):
while quotient != 1:
if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0):
ans.append(lowerCAmelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowercase__ ( _UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase : Dict = 0
# prime factorization of 'number'
lowercase : Any = prime_factorization(lowerCAmelCase_ )
lowercase : Tuple = max(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowercase__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase : List[Any] = 0
# prime factorization of 'number'
lowercase : Dict = prime_factorization(lowerCAmelCase_ )
lowercase : int = min(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowercase__ ( _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def lowercase__ ( _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def lowercase__ ( _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ )
), "'number' must been an int, even and > 2"
lowercase : str = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowercase : int = get_prime_numbers(lowerCAmelCase_ )
lowercase : List[str] = len(lowerCAmelCase_ )
# run variable for while-loops.
lowercase : Union[str, Any] = 0
lowercase : Tuple = None
# exit variable. for break up the loops
lowercase : int = True
while i < len_pn and loop:
lowercase : int = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowercase : Tuple = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (len(lowerCAmelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowercase : int = 0
while numbera != 0:
lowercase : str = numbera % numbera
lowercase : List[Any] = numbera
lowercase : Any = rest
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowercase : List[Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowercase : int = prime_factorization(lowerCAmelCase_ )
lowercase : int = prime_factorization(lowerCAmelCase_ )
elif numbera == 1 or numbera == 1:
lowercase : Union[str, Any] = []
lowercase : List[str] = []
lowercase : Union[str, Any] = max(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase : Optional[int] = 0
lowercase : Dict = 0
lowercase : Union[str, Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowercase : Optional[Any] = prime_fac_a.count(lowerCAmelCase_ )
lowercase : Tuple = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ):
ans *= n
else:
lowercase : List[str] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowercase : Optional[Any] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowercase__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int"
lowercase : List[Any] = 0
lowercase : Optional[int] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime(
lowerCAmelCase_ ), "'ans' must been a prime number and from type int"
return ans
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
assert (
is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowercase : Union[str, Any] = p_number_a + 1 # jump to the next number
lowercase : Optional[int] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowercase__ ( _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowercase : List[Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowercase__ ( _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowercase : Union[str, Any] = get_divisors(lowerCAmelCase_ )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowercase : Optional[Any] = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowercase__ ( _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowercase : Any = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowercase__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowercase : Any = 0
lowercase : List[Any] = 1
lowercase : Union[str, Any] = 1 # this will be return
for _ in range(n - 1 ):
lowercase : Union[str, Any] = ans
ans += fiba
lowercase : Optional[Any] = tmp
return ans
| 255 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : List[str] =get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_UpperCAmelCase : Optional[int] =25_0004
_UpperCAmelCase : Tuple =25_0020
@require_sentencepiece
@require_tokenizers
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = MBartTokenizer
SCREAMING_SNAKE_CASE__ : Dict = MBartTokenizerFast
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : List[str] = True
def lowercase_ ( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ : str = MBartTokenizer(__lowercase , keep_accents=__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[int] = MBartTokenizer(__lowercase , keep_accents=__lowercase )
lowerCAmelCase_ : Dict = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowerCAmelCase_ : Optional[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowerCAmelCase_ : Dict = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(
__lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def lowercase_ ( self ) -> Dict:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase_ : int = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase )
lowerCAmelCase_ : int = self.tokenizer_class.from_pretrained(__lowercase , **__lowercase )
lowerCAmelCase_ : Tuple = tempfile.mkdtemp()
lowerCAmelCase_ : Union[str, Any] = tokenizer_r.save_pretrained(__lowercase )
lowerCAmelCase_ : Dict = tokenizer_p.save_pretrained(__lowercase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCAmelCase_ : str = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(__lowercase , __lowercase )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : Tuple = tokenizer_r.from_pretrained(__lowercase )
lowerCAmelCase_ : Dict = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowercase )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase_ : Optional[Any] = tempfile.mkdtemp()
lowerCAmelCase_ : int = tokenizer_r.save_pretrained(__lowercase , legacy_format=__lowercase )
lowerCAmelCase_ : Tuple = tokenizer_p.save_pretrained(__lowercase )
# Checks it save with the same files
self.assertSequenceEqual(__lowercase , __lowercase )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : Optional[int] = tokenizer_r.from_pretrained(__lowercase )
lowerCAmelCase_ : List[Any] = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
shutil.rmtree(__lowercase )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase_ : Optional[Any] = tempfile.mkdtemp()
lowerCAmelCase_ : List[str] = tokenizer_r.save_pretrained(__lowercase , legacy_format=__lowercase )
lowerCAmelCase_ : Optional[int] = tokenizer_p.save_pretrained(__lowercase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : Dict = tokenizer_r.from_pretrained(__lowercase )
lowerCAmelCase_ : List[Any] = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
shutil.rmtree(__lowercase )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = """facebook/mbart-large-en-ro"""
SCREAMING_SNAKE_CASE__ : int = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
SCREAMING_SNAKE_CASE__ : str = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def lowercase_ ( cls ) -> Optional[int]:
lowerCAmelCase_ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
lowerCAmelCase_ : Optional[Any] = 1
return cls
def lowercase_ ( self ) -> Optional[Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 2_5_0_0_2_0 )
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowercase )
def lowercase_ ( self ) -> Any:
self.assertIn(__lowercase , self.tokenizer.all_special_ids )
lowerCAmelCase_ : Union[str, Any] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
lowerCAmelCase_ : Tuple = self.tokenizer.decode(__lowercase , skip_special_tokens=__lowercase )
lowerCAmelCase_ : List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertNotIn(self.tokenizer.eos_token , __lowercase )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Union[str, Any] = ['''this is gunna be a long sentence ''' * 2_0]
assert isinstance(src_text[0] , __lowercase )
lowerCAmelCase_ : str = 1_0
lowerCAmelCase_ : Tuple = self.tokenizer(__lowercase , max_length=__lowercase , truncation=__lowercase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __lowercase )
self.assertEqual(len(__lowercase ) , __lowercase )
def lowercase_ ( self ) -> int:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Any = tempfile.mkdtemp()
lowerCAmelCase_ : int = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : Optional[Any] = MBartTokenizer.from_pretrained(__lowercase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowercase )
@require_torch
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowercase , return_tensors='''pt''' )
lowerCAmelCase_ : Tuple = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowercase , truncation=__lowercase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
lowerCAmelCase_ : int = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
lowerCAmelCase_ : str = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowercase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Optional[Any] = self.tokenizer(self.src_text , padding=__lowercase , truncation=__lowercase , max_length=3 , return_tensors='''pt''' )
lowerCAmelCase_ : Any = self.tokenizer(
text_target=self.tgt_text , padding=__lowercase , truncation=__lowercase , max_length=1_0 , return_tensors='''pt''' )
lowerCAmelCase_ : int = targets['''input_ids''']
lowerCAmelCase_ : Optional[Any] = shift_tokens_right(__lowercase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Any = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(__lowercase ) , {
# A, test, EOS, en_XX
'''input_ids''': [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 2_5_0_0_0_1,
} , ) | 262 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"""configuration_xlm_roberta_xl""": [
"""XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaXLConfig""",
"""XLMRobertaXLOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaXLForCausalLM""",
"""XLMRobertaXLForMaskedLM""",
"""XLMRobertaXLForMultipleChoice""",
"""XLMRobertaXLForQuestionAnswering""",
"""XLMRobertaXLForSequenceClassification""",
"""XLMRobertaXLForTokenClassification""",
"""XLMRobertaXLModel""",
"""XLMRobertaXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 76 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = "cpu" , lowerCAmelCase_ = None )-> None:
lowerCAmelCase_ : str = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(lowerCAmelCase_ , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
lowerCAmelCase_ : int = v.half()
if save_path is None: # overwrite src_path
lowerCAmelCase_ : Tuple = src_path
torch.save(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
fire.Fire(convert) | 262 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCAmelCase__ : Union[str, Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
__UpperCamelCase : Optional[str] = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
__UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={'''help''': '''The column name of the images in the files.'''} )
__UpperCamelCase : Optional[str] = field(default=UpperCAmelCase__ , metadata={'''help''': '''A folder containing the training data.'''} )
__UpperCamelCase : Optional[str] = field(default=UpperCAmelCase__ , metadata={'''help''': '''A folder containing the validation data.'''} )
__UpperCamelCase : Optional[float] = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
__UpperCamelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__UpperCamelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
if self.train_dir is not None:
SCREAMING_SNAKE_CASE__ : int = self.train_dir
if self.validation_dir is not None:
SCREAMING_SNAKE_CASE__ : int = self.validation_dir
SCREAMING_SNAKE_CASE__ : Optional[Any] = data_files if data_files else None
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
__UpperCamelCase : str = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
__UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} )
__UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
__UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
__UpperCamelCase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__UpperCamelCase : str = field(default=UpperCAmelCase__ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
__UpperCamelCase : float = field(
default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} )
@dataclass
class lowerCAmelCase_ (UpperCAmelCase__ ):
"""simple docstring"""
__UpperCamelCase : float = field(
default=1E-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} )
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def lowercase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE__ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" ,lowerCAmelCase_ ,lowerCAmelCase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : str = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase_ )
transformers.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE__ : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE__ : Tuple = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,data_files=data_args.data_files ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
# If we don't have a validation split, split off a percentage of train as validation.
SCREAMING_SNAKE_CASE__ : Optional[int] = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split ,lowerCAmelCase_ ) and data_args.train_val_split > 0.0:
SCREAMING_SNAKE_CASE__ : List[str] = ds['''train'''].train_test_split(data_args.train_val_split )
SCREAMING_SNAKE_CASE__ : Optional[int] = split['''train''']
SCREAMING_SNAKE_CASE__ : List[str] = split['''test''']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__ : int = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
SCREAMING_SNAKE_CASE__ : Optional[int] = ViTMAEConfig.from_pretrained(model_args.config_name ,**lowerCAmelCase_ )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path ,**lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ : Any = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ViTImageProcessor.from_pretrained(model_args.image_processor_name ,**lowerCAmelCase_ )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE__ : int = ViTImageProcessor.from_pretrained(model_args.model_name_or_path ,**lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ : int = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
SCREAMING_SNAKE_CASE__ : str = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=lowerCAmelCase_ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
logger.info("""Training new model from scratch""" )
SCREAMING_SNAKE_CASE__ : str = ViTMAEForPreTraining(lowerCAmelCase_ )
if training_args.do_train:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ds['''train'''].column_names
else:
SCREAMING_SNAKE_CASE__ : int = ds['''validation'''].column_names
if data_args.image_column_name is not None:
SCREAMING_SNAKE_CASE__ : Dict = data_args.image_column_name
elif "image" in column_names:
SCREAMING_SNAKE_CASE__ : List[str] = '''image'''
elif "img" in column_names:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''img'''
else:
SCREAMING_SNAKE_CASE__ : List[str] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor.size['''shortest_edge''']
else:
SCREAMING_SNAKE_CASE__ : str = (image_processor.size['''height'''], image_processor.size['''width'''])
SCREAMING_SNAKE_CASE__ : List[str] = Compose(
[
Lambda(lambda _snake_case : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(lowerCAmelCase_ ,scale=(0.2, 1.0) ,interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean ,std=image_processor.image_std ),
] )
def preprocess_images(_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [transforms(lowerCAmelCase_ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE__ : List[str] = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowerCAmelCase_ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowerCAmelCase_ )
# Compute absolute learning rate
SCREAMING_SNAKE_CASE__ : Tuple = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
SCREAMING_SNAKE_CASE__ : Tuple = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
SCREAMING_SNAKE_CASE__ : Optional[int] = Trainer(
model=lowerCAmelCase_ ,args=lowerCAmelCase_ ,train_dataset=ds["""train"""] if training_args.do_train else None ,eval_dataset=ds["""validation"""] if training_args.do_eval else None ,tokenizer=lowerCAmelCase_ ,data_collator=lowerCAmelCase_ ,)
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE__ : str = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE__ : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE__ : Any = last_checkpoint
SCREAMING_SNAKE_CASE__ : List[Any] = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model()
trainer.log_metrics("""train""" ,train_result.metrics )
trainer.save_metrics("""train""" ,train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE__ : Tuple = trainer.evaluate()
trainer.log_metrics("""eval""" ,lowerCAmelCase_ )
trainer.save_metrics("""eval""" ,lowerCAmelCase_ )
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE__ : Optional[int] = {
'''tasks''': '''masked-auto-encoding''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-auto-encoding'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase_ )
else:
trainer.create_model_card(**lowerCAmelCase_ )
def lowercase_ ( _snake_case ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 25 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 262 | 0 |
from __future__ import annotations
import bisect
def lowercase__ ( __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : List[Any] = 0 , __snake_case : int = -1 ):
'''simple docstring'''
if hi < 0:
UpperCAmelCase_ : List[str] = len(lowerCAmelCase_ )
while lo < hi:
UpperCAmelCase_ : Optional[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
UpperCAmelCase_ : Optional[int] = mid + 1
else:
UpperCAmelCase_ : Union[str, Any] = mid
return lo
def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : int = 0 , __snake_case : Any = -1 ):
'''simple docstring'''
if hi < 0:
UpperCAmelCase_ : Union[str, Any] = len(lowerCAmelCase_ )
while lo < hi:
UpperCAmelCase_ : int = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
UpperCAmelCase_ : Optional[int] = mid + 1
else:
UpperCAmelCase_ : Union[str, Any] = mid
return lo
def lowercase__ ( __snake_case : int , __snake_case : str , __snake_case : List[Any] = 0 , __snake_case : Dict = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_left(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowercase__ ( __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] = 0 , __snake_case : List[str] = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_right(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowercase__ ( __snake_case : Any , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : Dict = len(lowerCAmelCase_ ) - 1
while left <= right:
UpperCAmelCase_ : Union[str, Any] = left + (right - left) // 2
UpperCAmelCase_ : Union[str, Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
UpperCAmelCase_ : Optional[Any] = midpoint - 1
else:
UpperCAmelCase_ : Optional[Any] = midpoint + 1
return None
def lowercase__ ( __snake_case : int , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Any = bisect.bisect_left(lowerCAmelCase_ , lowerCAmelCase_ )
if index != len(lowerCAmelCase_ ) and sorted_collection[index] == item:
return index
return None
def lowercase__ ( __snake_case : Any , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Any ):
'''simple docstring'''
if right < left:
return None
UpperCAmelCase_ : int = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , midpoint - 1 )
else:
return binary_search_by_recursion(lowerCAmelCase_ , lowerCAmelCase_ , midpoint + 1 , lowerCAmelCase_ )
if __name__ == "__main__":
__UpperCAmelCase = input('Enter numbers separated by comma:\n').strip()
__UpperCAmelCase = sorted(int(item) for item in user_input.split(','))
__UpperCAmelCase = int(input('Enter a single number to be found in the list:\n'))
__UpperCAmelCase = binary_search(collection, target)
if result is None:
print(F'{target} was not found in {collection}.')
else:
print(F'{target} was found at position {result} in {collection}.')
| 29 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Optional[Any] =logging.get_logger(__name__)
_UpperCAmelCase : str ={
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = """vit_mae"""
def __init__( self , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase=3_0_7_2 , __lowercase="gelu" , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=1e-12 , __lowercase=2_2_4 , __lowercase=1_6 , __lowercase=3 , __lowercase=True , __lowercase=1_6 , __lowercase=5_1_2 , __lowercase=8 , __lowercase=2_0_4_8 , __lowercase=0.75 , __lowercase=False , **__lowercase , ) -> str:
super().__init__(**__lowercase )
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : Any = num_hidden_layers
lowerCAmelCase_ : Any = num_attention_heads
lowerCAmelCase_ : int = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : int = hidden_dropout_prob
lowerCAmelCase_ : str = attention_probs_dropout_prob
lowerCAmelCase_ : List[str] = initializer_range
lowerCAmelCase_ : Dict = layer_norm_eps
lowerCAmelCase_ : Union[str, Any] = image_size
lowerCAmelCase_ : Optional[int] = patch_size
lowerCAmelCase_ : Tuple = num_channels
lowerCAmelCase_ : List[str] = qkv_bias
lowerCAmelCase_ : List[Any] = decoder_num_attention_heads
lowerCAmelCase_ : int = decoder_hidden_size
lowerCAmelCase_ : Optional[int] = decoder_num_hidden_layers
lowerCAmelCase_ : Tuple = decoder_intermediate_size
lowerCAmelCase_ : Tuple = mask_ratio
lowerCAmelCase_ : Any = norm_pix_loss | 262 | 0 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 24 |
def lowerCAmelCase ( lowerCAmelCase_ = 10**9 )-> int:
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Optional[int] = 2
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : str = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
lowerCAmelCase_ : Any = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""") | 262 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class __A ( unittest.TestCase ):
def _lowercase (self : Any ):
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = SamImageProcessor()
UpperCAmelCase_ = SamProcessor(__lowercase )
processor.save_pretrained(self.tmpdirname )
def _lowercase (self : str , **__a : Optional[Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowercase ).image_processor
def _lowercase (self : Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase_ = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase (self : List[str] ):
UpperCAmelCase_ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 )
UpperCAmelCase_ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=__lowercase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def _lowercase (self : str ):
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = SamProcessor(image_processor=__lowercase )
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = image_processor(__lowercase , return_tensors="np" )
UpperCAmelCase_ = processor(images=__lowercase , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def _lowercase (self : Dict ):
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = SamProcessor(image_processor=__lowercase )
UpperCAmelCase_ = [torch.ones((1, 3, 5, 5) )]
UpperCAmelCase_ = [[1764, 2646]]
UpperCAmelCase_ = [[683, 1024]]
UpperCAmelCase_ = processor.post_process_masks(__lowercase , __lowercase , __lowercase )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
UpperCAmelCase_ = processor.post_process_masks(
__lowercase , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
UpperCAmelCase_ = [np.ones((1, 3, 5, 5) )]
UpperCAmelCase_ = processor.post_process_masks(__lowercase , np.array(__lowercase ) , np.array(__lowercase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
UpperCAmelCase_ = [[1, 0], [0, 1]]
with self.assertRaises(__lowercase ):
UpperCAmelCase_ = processor.post_process_masks(__lowercase , np.array(__lowercase ) , np.array(__lowercase ) )
@require_vision
@require_tf
class __A ( unittest.TestCase ):
def _lowercase (self : List[str] ):
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = SamImageProcessor()
UpperCAmelCase_ = SamProcessor(__lowercase )
processor.save_pretrained(self.tmpdirname )
def _lowercase (self : Optional[Any] , **__a : int ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowercase ).image_processor
def _lowercase (self : Dict ):
shutil.rmtree(self.tmpdirname )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase_ = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 )
UpperCAmelCase_ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=__lowercase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = SamProcessor(image_processor=__lowercase )
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = image_processor(__lowercase , return_tensors="np" )
UpperCAmelCase_ = processor(images=__lowercase , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = SamProcessor(image_processor=__lowercase )
UpperCAmelCase_ = [tf.ones((1, 3, 5, 5) )]
UpperCAmelCase_ = [[1764, 2646]]
UpperCAmelCase_ = [[683, 1024]]
UpperCAmelCase_ = processor.post_process_masks(__lowercase , __lowercase , __lowercase , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
UpperCAmelCase_ = processor.post_process_masks(
__lowercase , tf.convert_to_tensor(__lowercase ) , tf.convert_to_tensor(__lowercase ) , return_tensors="tf" , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
UpperCAmelCase_ = [np.ones((1, 3, 5, 5) )]
UpperCAmelCase_ = processor.post_process_masks(
__lowercase , np.array(__lowercase ) , np.array(__lowercase ) , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
UpperCAmelCase_ = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
UpperCAmelCase_ = processor.post_process_masks(
__lowercase , np.array(__lowercase ) , np.array(__lowercase ) , return_tensors="tf" )
@require_vision
@require_torchvision
class __A ( unittest.TestCase ):
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = SamImageProcessor()
UpperCAmelCase_ = SamProcessor(__lowercase )
processor.save_pretrained(self.tmpdirname )
def _lowercase (self : Dict , **__a : Optional[int] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowercase ).image_processor
def _lowercase (self : int ):
shutil.rmtree(self.tmpdirname )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase_ = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = SamProcessor(image_processor=__lowercase )
UpperCAmelCase_ = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
UpperCAmelCase_ = [tf.convert_to_tensor(__lowercase )]
UpperCAmelCase_ = [torch.tensor(__lowercase )]
UpperCAmelCase_ = [[1764, 2646]]
UpperCAmelCase_ = [[683, 1024]]
UpperCAmelCase_ = processor.post_process_masks(
__lowercase , __lowercase , __lowercase , return_tensors="tf" )
UpperCAmelCase_ = processor.post_process_masks(
__lowercase , __lowercase , __lowercase , return_tensors="pt" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = SamProcessor(image_processor=__lowercase )
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = image_processor(__lowercase , return_tensors="pt" )['''pixel_values'''].numpy()
UpperCAmelCase_ = processor(images=__lowercase , return_tensors="pt" )['''pixel_values'''].numpy()
UpperCAmelCase_ = image_processor(__lowercase , return_tensors="tf" )['''pixel_values'''].numpy()
UpperCAmelCase_ = processor(images=__lowercase , return_tensors="tf" )['''pixel_values'''].numpy()
self.assertTrue(np.allclose(__lowercase , __lowercase ) )
self.assertTrue(np.allclose(__lowercase , __lowercase ) )
self.assertTrue(np.allclose(__lowercase , __lowercase ) )
| 1 |
import inspect
import unittest
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> int:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def lowercase_ ( self ) -> List[str]:
import diffusers
from diffusers.dependency_versions_table import deps
lowerCAmelCase_ : Any = inspect.getmembers(__lowercase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
lowerCAmelCase_ : Optional[int] = '''k-diffusion'''
elif backend == "invisible_watermark":
lowerCAmelCase_ : Dict = '''invisible-watermark'''
assert backend in deps, f"""{backend} is not in the deps table!""" | 262 | 0 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def __magic_name__ ( ) -> None:
'''simple docstring'''
print('''Making key files...''' )
make_key_files('''rsa''', 1024 )
print('''Key files generation successful.''' )
def __magic_name__ ( __UpperCAmelCase ) -> tuple[tuple[int, int], tuple[int, int]]:
'''simple docstring'''
print('''Generating prime p...''' )
snake_case_ = rabinMiller.generate_large_prime(lowerCAmelCase_ )
print('''Generating prime q...''' )
snake_case_ = rabinMiller.generate_large_prime(lowerCAmelCase_ )
snake_case_ = p * q
print('''Generating e that is relatively prime to (p - 1) * (q - 1)...''' )
while True:
snake_case_ = random.randrange(2 ** (key_size - 1), 2 ** (key_size) )
if cryptoMath.gcd(lowerCAmelCase_, (p - 1) * (q - 1) ) == 1:
break
print('''Calculating d that is mod inverse of e...''' )
snake_case_ = cryptoMath.find_mod_inverse(lowerCAmelCase_, (p - 1) * (q - 1) )
snake_case_ = (n, e)
snake_case_ = (n, d)
return (public_key, private_key)
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> None:
'''simple docstring'''
if os.path.exists(F"{name}_pubkey.txt" ) or os.path.exists(F"{name}_privkey.txt" ):
print('''\nWARNING:''' )
print(
F"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
snake_case_ = generate_key(lowerCAmelCase_ )
print(F"\nWriting public key to file {name}_pubkey.txt..." )
with open(F"{name}_pubkey.txt", '''w''' ) as out_file:
out_file.write(F"{key_size},{public_key[0]},{public_key[1]}" )
print(F"Writing private key to file {name}_privkey.txt..." )
with open(F"{name}_privkey.txt", '''w''' ) as out_file:
out_file.write(F"{key_size},{private_key[0]},{private_key[1]}" )
if __name__ == "__main__":
main()
| 56 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_UpperCAmelCase : Any =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__lowercase , **__lowercase ) -> None:
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , __lowercase , )
super().__init__(*__lowercase , **__lowercase ) | 262 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , __lowercase , )
super().__init__(*__lowercase , **__lowercase )
| 108 |
def lowerCAmelCase ( lowerCAmelCase_ )-> set:
lowerCAmelCase_ : Optional[int] = set()
# edges = list of graph's edges
lowerCAmelCase_ : List[str] = get_edges(lowerCAmelCase_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = edges.pop()
chosen_vertices.add(lowerCAmelCase_ )
chosen_vertices.add(lowerCAmelCase_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowerCAmelCase_ )
return chosen_vertices
def lowerCAmelCase ( lowerCAmelCase_ )-> set:
lowerCAmelCase_ : List[Any] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}") | 262 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
a_ = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def a__ ( ):
__lowerCamelCase = _ask_options(
'''In which compute environment are you running?''' ,['''This machine''', '''AWS (Amazon SageMaker)'''] ,_convert_compute_environment ,)
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__lowerCamelCase = get_sagemaker_input()
else:
__lowerCamelCase = get_cluster_input()
return config
def a__ ( _UpperCamelCase : Any=None ):
if subparsers is not None:
__lowerCamelCase = subparsers.add_parser('''config''' ,description=lowerCAmelCase_ )
else:
__lowerCamelCase = argparse.ArgumentParser('''Accelerate config command''' ,description=lowerCAmelCase_ )
parser.add_argument(
'''--config_file''' ,default=lowerCAmelCase_ ,help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) ,)
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def a__ ( _UpperCamelCase : List[Any] ):
__lowerCamelCase = get_user_input()
if args.config_file is not None:
__lowerCamelCase = args.config_file
else:
if not os.path.isdir(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
__lowerCamelCase = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(lowerCAmelCase_ )
else:
config.to_yaml_file(lowerCAmelCase_ )
print(F"""accelerate configuration saved at {config_file}""" )
def a__ ( ):
__lowerCamelCase = config_command_parser()
__lowerCamelCase = parser.parse_args()
config_command(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 330 |
from math import sqrt
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase_ : List[Any] = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase_ : Optional[int] = False
for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase_ : Tuple = False
break
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool"
return status
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase_ : Tuple = list(range(2 , n + 1 ) )
lowerCAmelCase_ : Optional[int] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase_ : str = 0
# filters actual prime numbers.
lowerCAmelCase_ : Optional[int] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase_ : List[Any] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCAmelCase_ ):
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase_ : int = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase_ : List[Any] = 2
lowerCAmelCase_ : Optional[int] = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase_ ):
while quotient != 1:
if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0):
ans.append(lowerCAmelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[int]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : Dict = 0
# prime factorization of 'number'
lowerCAmelCase_ : Any = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = max(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : List[Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ : Dict = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : int = min(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ )
), "'number' must been an int, even and > 2"
lowerCAmelCase_ : str = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase_ : int = get_prime_numbers(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = len(lowerCAmelCase_ )
# run variable for while-loops.
lowerCAmelCase_ : Union[str, Any] = 0
lowerCAmelCase_ : Tuple = None
# exit variable. for break up the loops
lowerCAmelCase_ : int = True
while i < len_pn and loop:
lowerCAmelCase_ : int = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase_ : Tuple = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (len(lowerCAmelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : int = 0
while numbera != 0:
lowerCAmelCase_ : str = numbera % numbera
lowerCAmelCase_ : List[Any] = numbera
lowerCAmelCase_ : Any = rest
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : List[Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ )
elif numbera == 1 or numbera == 1:
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : Union[str, Any] = max(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : Union[str, Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase_ : Optional[Any] = prime_fac_a.count(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ):
ans *= n
else:
lowerCAmelCase_ : List[str] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase_ : Optional[Any] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : Optional[int] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime(
lowerCAmelCase_ ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
assert (
is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase_ : Union[str, Any] = p_number_a + 1 # jump to the next number
lowerCAmelCase_ : Optional[int] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase_ : List[Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase_ : Union[str, Any] = get_divisors(lowerCAmelCase_ )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase_ : Optional[Any] = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase_ : Any = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Union[str, Any] = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase_ : Union[str, Any] = ans
ans += fiba
lowerCAmelCase_ : Optional[Any] = tmp
return ans | 262 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : Dict ,_snake_case : int=7 ,_snake_case : Optional[int]=3 ,_snake_case : Optional[Any]=18 ,_snake_case : Dict=30 ,_snake_case : Any=400 ,_snake_case : Any=True ,_snake_case : int=None ,_snake_case : Optional[Any]=True ,_snake_case : Any=None ,) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = size if size is not None else {'''shortest_edge''': 20}
lowercase__ : Any = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowercase__ : Any = parent
lowercase__ : Any = batch_size
lowercase__ : Optional[int] = num_channels
lowercase__ : Tuple = image_size
lowercase__ : List[str] = min_resolution
lowercase__ : Dict = max_resolution
lowercase__ : Tuple = do_resize
lowercase__ : Optional[Any] = size
lowercase__ : Union[str, Any] = do_center_crop
lowercase__ : Optional[Any] = crop_size
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __A ( UpperCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Dict = MobileNetVaImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowercase__ : Union[str, Any] = MobileNetVaImageProcessingTester(self )
@property
def UpperCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase ,'''do_resize''' ) )
self.assertTrue(hasattr(__lowercase ,'''size''' ) )
self.assertTrue(hasattr(__lowercase ,'''do_center_crop''' ) )
self.assertTrue(hasattr(__lowercase ,'''crop_size''' ) )
def UpperCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
lowercase__ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size ,{'''height''': 18, '''width''': 18} )
lowercase__ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size ,{'''height''': 84, '''width''': 84} )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
pass
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase ,Image.Image )
# Test not batched input
lowercase__ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
lowercase__ : Tuple = image_processing(__lowercase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowercase ,numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase ,np.ndarray )
# Test not batched input
lowercase__ : Any = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
lowercase__ : Tuple = image_processing(__lowercase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowercase ,torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase ,torch.Tensor )
# Test not batched input
lowercase__ : List[Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
lowercase__ : Dict = image_processing(__lowercase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
| 16 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_UpperCAmelCase : Union[str, Any] ="""pt"""
elif is_tf_available():
_UpperCAmelCase : List[Any] ="""tf"""
else:
_UpperCAmelCase : Optional[int] ="""jax"""
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PerceiverTokenizer
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def lowercase_ ( self ) -> Optional[int]:
super().setUp()
lowerCAmelCase_ : str = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self ) -> Any:
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def lowercase_ ( self , **__lowercase ) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase=False , __lowercase=2_0 , __lowercase=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowerCAmelCase_ : Optional[Any] = []
for i in range(len(__lowercase ) ):
try:
lowerCAmelCase_ : List[str] = tokenizer.decode([i] , clean_up_tokenization_spaces=__lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCAmelCase_ : List[str] = list(filter(lambda __lowercase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , __lowercase ) )
lowerCAmelCase_ : Optional[int] = list(filter(lambda __lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowercase ) , __lowercase ) )
if max_length is not None and len(__lowercase ) > max_length:
lowerCAmelCase_ : Union[str, Any] = toks[:max_length]
if min_length is not None and len(__lowercase ) < min_length and len(__lowercase ) > 0:
while len(__lowercase ) < min_length:
lowerCAmelCase_ : Union[str, Any] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase_ : List[str] = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase_ : int = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
if " " not in output_txt and len(__lowercase ) > 1:
lowerCAmelCase_ : Optional[int] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowercase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowercase )
)
if with_prefix_space:
lowerCAmelCase_ : Any = ''' ''' + output_txt
lowerCAmelCase_ : List[str] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
return output_txt, output_ids
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : List[str] = self.perceiver_tokenizer
lowerCAmelCase_ : Any = '''Unicode €.'''
lowerCAmelCase_ : Dict = tokenizer(__lowercase )
lowerCAmelCase_ : Any = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['''input_ids'''] , __lowercase )
# decoding
lowerCAmelCase_ : str = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , '''[CLS]Unicode €.[SEP]''' )
lowerCAmelCase_ : Optional[int] = tokenizer('''e è é ê ë''' )
lowerCAmelCase_ : str = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['''input_ids'''] , __lowercase )
# decoding
lowerCAmelCase_ : int = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Any = self.perceiver_tokenizer
lowerCAmelCase_ : Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
lowerCAmelCase_ : Optional[int] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
lowerCAmelCase_ : Optional[int] = tokenizer(__lowercase , padding=__lowercase , return_tensors=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
if FRAMEWORK != "jax":
lowerCAmelCase_ : str = list(batch.input_ids.numpy()[0] )
else:
lowerCAmelCase_ : Union[str, Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__lowercase , __lowercase )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : int = self.perceiver_tokenizer
lowerCAmelCase_ : Optional[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCAmelCase_ : List[Any] = tokenizer(__lowercase , padding=__lowercase , return_tensors=__lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , __lowercase )
self.assertIn('''attention_mask''' , __lowercase )
self.assertNotIn('''decoder_input_ids''' , __lowercase )
self.assertNotIn('''decoder_attention_mask''' , __lowercase )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = self.perceiver_tokenizer
lowerCAmelCase_ : int = [
'''Summary of the text.''',
'''Another summary.''',
]
lowerCAmelCase_ : List[str] = tokenizer(
text_target=__lowercase , max_length=3_2 , padding='''max_length''' , truncation=__lowercase , return_tensors=__lowercase )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
def lowercase_ ( self ) -> Optional[Any]:
# safety check on max_len default value so we are sure the test works
lowerCAmelCase_ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
lowerCAmelCase_ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase_ : Union[str, Any] = tempfile.mkdtemp()
lowerCAmelCase_ : str = ''' He is very happy, UNwant\u00E9d,running'''
lowerCAmelCase_ : Optional[int] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : Any = tokenizer.__class__.from_pretrained(__lowercase )
lowerCAmelCase_ : Tuple = after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
shutil.rmtree(__lowercase )
lowerCAmelCase_ : Optional[int] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase_ : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase_ : List[str] = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
lowerCAmelCase_ : Any = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowerCAmelCase_ : str = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : str = tokenizer.__class__.from_pretrained(__lowercase )
lowerCAmelCase_ : Optional[Any] = after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
lowerCAmelCase_ : str = tokenizer.__class__.from_pretrained(__lowercase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(__lowercase )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowercase )
with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase_ : Tuple = json.load(__lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase_ : Any = json.load(__lowercase )
lowerCAmelCase_ : Optional[int] = [f"""<extra_id_{i}>""" for i in range(1_2_5 )]
lowerCAmelCase_ : Optional[Any] = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
lowerCAmelCase_ : Any = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowercase , __lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowercase , __lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase_ : int = tokenizer_class.from_pretrained(
__lowercase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase_ : Tuple = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=__lowercase )]
lowerCAmelCase_ : Dict = tokenizer_class.from_pretrained(
__lowercase , additional_special_tokens=__lowercase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Any = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , '''�''' )
def lowercase_ ( self ) -> Tuple:
pass
def lowercase_ ( self ) -> Any:
pass
def lowercase_ ( self ) -> Tuple:
pass
def lowercase_ ( self ) -> List[str]:
pass
def lowercase_ ( self ) -> Dict:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
lowerCAmelCase_ : Tuple = self.get_tokenizers(fast=__lowercase , do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase_ : List[str] = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
lowerCAmelCase_ : Optional[int] = tokenizer.convert_tokens_to_string(__lowercase )
self.assertIsInstance(__lowercase , __lowercase ) | 262 | 0 |
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : Dict = None , _lowercase : str = None ) ->None:
'''simple docstring'''
if start is None:
a : int = 0
if end is None:
a : List[Any] = len(lowerCAmelCase_ ) - 1
if start >= end:
return
a : str = (start + end) // 2
slowsort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
slowsort(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ )
if sequence[end] < sequence[mid]:
a : int = sequence[mid], sequence[end]
slowsort(lowerCAmelCase_ , lowerCAmelCase_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 105 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class snake_case__:
'''simple docstring'''
@staticmethod
def lowercase_ ( *__lowercase , **__lowercase ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
@require_torch
class snake_case__( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCAmelCase_ : str = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def lowercase_ ( self , __lowercase , __lowercase ) -> str:
lowerCAmelCase_ : Tuple = object_detector(examples[0] , threshold=0.0 )
lowerCAmelCase_ : Dict = len(__lowercase )
self.assertGreater(__lowercase , 0 )
self.assertEqual(
__lowercase , [
{
'''score''': ANY(__lowercase ),
'''label''': ANY(__lowercase ),
'''box''': {'''xmin''': ANY(__lowercase ), '''ymin''': ANY(__lowercase ), '''xmax''': ANY(__lowercase ), '''ymax''': ANY(__lowercase )},
}
for i in range(__lowercase )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def lowercase_ ( self ) -> List[str]:
pass
@require_torch
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Union[str, Any] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCAmelCase_ : Union[str, Any] = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
] , )
lowerCAmelCase_ : Union[str, Any] = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
]
] , )
@require_torch
@slow
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Any = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase_ : Dict = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
] , )
lowerCAmelCase_ : Tuple = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def lowercase_ ( self ) -> List[str]:
pass
@require_torch
@slow
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Any = 0.2
lowerCAmelCase_ : List[Any] = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase_ : Optional[Any] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=__lowercase , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
] , )
@require_torch
@slow
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Dict = 2
lowerCAmelCase_ : Union[str, Any] = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase_ : Optional[Any] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=__lowercase , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
] , ) | 262 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a__ ( UpperCAmelCase__, unittest.TestCase ):
_lowerCamelCase = DiTPipeline
_lowerCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_lowerCamelCase = PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
_lowerCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_lowerCamelCase = False
def lowercase ( self : Dict ) -> List[str]:
torch.manual_seed(0 )
lowercase : Optional[Any] = TransformeraDModel(
sample_size=16, num_layers=2, patch_size=4, attention_head_dim=8, num_attention_heads=2, in_channels=4, out_channels=8, attention_bias=__lowercase, activation_fn='gelu-approximate', num_embeds_ada_norm=1000, norm_type='ada_norm_zero', norm_elementwise_affine=__lowercase, )
lowercase : Optional[int] = AutoencoderKL()
lowercase : List[Any] = DDIMScheduler()
lowercase : Union[str, Any] = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def lowercase ( self : Union[str, Any], lowerCAmelCase : str, lowerCAmelCase : Optional[Any]=0 ) -> Union[str, Any]:
if str(__lowercase ).startswith('mps' ):
lowercase : Optional[Any] = torch.manual_seed(__lowercase )
else:
lowercase : Optional[int] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
lowercase : int = {
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowercase ( self : int ) -> Any:
lowercase : Any = '''cpu'''
lowercase : int = self.get_dummy_components()
lowercase : Tuple = self.pipeline_class(**__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowercase : str = self.get_dummy_inputs(__lowercase )
lowercase : int = pipe(**__lowercase ).images
lowercase : str = image[0, -3:, -3:, -1]
self.assertEqual(image.shape, (1, 16, 16, 3) )
lowercase : Any = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
lowercase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowercase, 1e-3 )
def lowercase ( self : Optional[Any] ) -> int:
self._test_inference_batch_single_identical(relax_max_difference=__lowercase, expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', )
def lowercase ( self : int ) -> Optional[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
def lowercase ( self : Optional[int] ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : List[Any] ) -> int:
lowercase : Dict = torch.manual_seed(0 )
lowercase : str = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
lowercase : Optional[Any] = ['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
lowercase : str = pipe.get_label_ids(__lowercase )
lowercase : Union[str, Any] = pipe(__lowercase, generator=__lowercase, num_inference_steps=40, output_type='np' ).images
for word, image in zip(__lowercase, __lowercase ):
lowercase : Dict = load_numpy(
f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def lowercase ( self : Optional[Any] ) -> List[str]:
lowercase : Any = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
lowercase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
lowercase : Any = ['''vase''', '''umbrella''']
lowercase : List[Any] = pipe.get_label_ids(__lowercase )
lowercase : Optional[Any] = torch.manual_seed(0 )
lowercase : List[Any] = pipe(__lowercase, generator=__lowercase, num_inference_steps=25, output_type='np' ).images
for word, image in zip(__lowercase, __lowercase ):
lowercase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 255 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels | 262 | 0 |
import numpy
# List of input, output pairs
a_ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
a_ = (((515, 22, 13), 555), ((61, 35, 49), 150))
a_ = [2, 4, 1, 5]
a_ = len(train_data)
a_ = 0.009
def lowerCamelCase__ ( _a , _a="train"):
return calculate_hypothesis_value(lowerCAmelCase_ , lowerCAmelCase_) - output(
lowerCAmelCase_ , lowerCAmelCase_)
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Tuple = 0
for i in range(len(lowerCAmelCase_) - 1):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def lowerCamelCase__ ( _a , _a):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def lowerCamelCase__ ( _a , _a):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0])
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0])
return None
def lowerCamelCase__ ( _a , _a=m):
SCREAMING_SNAKE_CASE : List[str] = 0
for i in range(lowerCAmelCase_):
if index == -1:
summation_value += _error(lowerCAmelCase_)
else:
summation_value += _error(lowerCAmelCase_) * train_data[i][0][index]
return summation_value
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : int = summation_of_cost_derivative(lowerCAmelCase_ , lowerCAmelCase_) / m
return cost_derivative_value
def lowerCamelCase__ ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
SCREAMING_SNAKE_CASE : str = 0.00_0002
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
while True:
j += 1
SCREAMING_SNAKE_CASE : Dict = [0, 0, 0, 0]
for i in range(0 , len(lowerCAmelCase_)):
SCREAMING_SNAKE_CASE : Tuple = get_cost_derivative(i - 1)
SCREAMING_SNAKE_CASE : Any = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowerCAmelCase_ , lowerCAmelCase_ , atol=lowerCAmelCase_ , rtol=lowerCAmelCase_ , ):
break
SCREAMING_SNAKE_CASE : Any = temp_parameter_vector
print(("Number of iterations:", j))
def lowerCamelCase__ ( ):
for i in range(len(lowerCAmelCase_)):
print(("Actual output value:", output(lowerCAmelCase_ , "test")))
print(("Hypothesis output:", calculate_hypothesis_value(lowerCAmelCase_ , "test")))
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent() | 76 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
_UpperCAmelCase : Dict ={
"""susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""",
"""susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""",
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """ernie_m"""
SCREAMING_SNAKE_CASE__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __lowercase = 2_5_0_0_0_2 , __lowercase = 7_6_8 , __lowercase = 1_2 , __lowercase = 1_2 , __lowercase = 3_0_7_2 , __lowercase = "gelu" , __lowercase = 0.1 , __lowercase = 0.1 , __lowercase = 5_1_4 , __lowercase = 0.02 , __lowercase = 1 , __lowercase = 1e-05 , __lowercase=None , __lowercase=False , __lowercase=0.0 , **__lowercase , ) -> Tuple:
super().__init__(pad_token_id=__lowercase , **__lowercase )
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : Tuple = num_hidden_layers
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : Dict = intermediate_size
lowerCAmelCase_ : int = hidden_act
lowerCAmelCase_ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase_ : Any = attention_probs_dropout_prob
lowerCAmelCase_ : Union[str, Any] = max_position_embeddings
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : List[str] = layer_norm_eps
lowerCAmelCase_ : List[Any] = classifier_dropout
lowerCAmelCase_ : Any = is_decoder
lowerCAmelCase_ : List[Any] = act_dropout | 262 | 0 |
"""simple docstring"""
from __future__ import annotations
UpperCAmelCase__ : Union[str, Any] = list[tuple[int, int]]
UpperCAmelCase__ : Any = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCAmelCase__ : Optional[Any] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = pos_x
SCREAMING_SNAKE_CASE__ : int = pos_y
SCREAMING_SNAKE_CASE__ : Tuple = (pos_y, pos_x)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = goal_x
SCREAMING_SNAKE_CASE__ : Dict = goal_y
SCREAMING_SNAKE_CASE__ : List[Any] = g_cost
SCREAMING_SNAKE_CASE__ : Tuple = parent
SCREAMING_SNAKE_CASE__ : Dict = self.calculate_heuristic()
def __magic_name__ (self ) -> float:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = abs(self.pos_x - self.goal_x )
SCREAMING_SNAKE_CASE__ : Any = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__(self , SCREAMING_SNAKE_CASE__ ) -> bool:
"""simple docstring"""
return self.f_cost < other.f_cost
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowercase )
SCREAMING_SNAKE_CASE__ : str = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , __lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.start]
SCREAMING_SNAKE_CASE__ : list[Node] = []
SCREAMING_SNAKE_CASE__ : List[str] = False
def __magic_name__ (self ) -> Path | None:
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
SCREAMING_SNAKE_CASE__ : List[str] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
SCREAMING_SNAKE_CASE__ : Any = True
return self.retrace_path(__lowercase )
self.closed_nodes.append(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict = self.get_successors(__lowercase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__lowercase )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE__ : int = self.open_nodes.pop(self.open_nodes.index(__lowercase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__lowercase )
else:
self.open_nodes.append(__lowercase )
if not self.reached:
return [self.start.pos]
return None
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> list[Node]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for action in delta:
SCREAMING_SNAKE_CASE__ : List[str] = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE__ : str = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowercase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__lowercase , __lowercase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowercase , ) )
return successors
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Path:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = node
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE__ : List[Any] = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = (0, 0)
UpperCAmelCase__ : str = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('------')
UpperCAmelCase__ : Tuple = GreedyBestFirst(init, goal)
UpperCAmelCase__ : Optional[int] = greedy_bf.search()
if path:
for pos_x, pos_y in path:
UpperCAmelCase__ : Dict = 2
for elem in grid:
print(elem)
| 25 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=7 , __lowercase=3 , __lowercase=1_8 , __lowercase=3_0 , __lowercase=4_0_0 , __lowercase=True , __lowercase=None , __lowercase=True , __lowercase=None , ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = size if size is not None else {'''shortest_edge''': 2_0}
lowerCAmelCase_ : Any = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : Any = batch_size
lowerCAmelCase_ : Optional[int] = num_channels
lowerCAmelCase_ : Tuple = image_size
lowerCAmelCase_ : List[str] = min_resolution
lowerCAmelCase_ : Dict = max_resolution
lowerCAmelCase_ : Tuple = do_resize
lowerCAmelCase_ : Optional[Any] = size
lowerCAmelCase_ : Union[str, Any] = do_center_crop
lowerCAmelCase_ : Optional[Any] = crop_size
def lowercase_ ( self ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = MobileNetVaImageProcessingTester(self )
@property
def lowercase_ ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowercase , '''size''' ) )
self.assertTrue(hasattr(__lowercase , '''do_center_crop''' ) )
self.assertTrue(hasattr(__lowercase , '''crop_size''' ) )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 2_0} )
self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} )
lowerCAmelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} )
def lowercase_ ( self ) -> Tuple:
pass
def lowercase_ ( self ) -> Union[str, Any]:
# Initialize image_processing
lowerCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
lowerCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Tuple = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase_ ( self ) -> Optional[int]:
# Initialize image_processing
lowerCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
lowerCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Tuple = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase_ ( self ) -> Any:
# Initialize image_processing
lowerCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
lowerCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Dict = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , ) | 262 | 0 |
def lowercase__ ( __snake_case : str , __snake_case : int ):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(lowerCAmelCase_ ) , lowerCAmelCase_ )
return number - int(lowerCAmelCase_ )
if __name__ == "__main__":
print(decimal_isolate(1.5_3, 0))
print(decimal_isolate(3_5.3_4_5, 1))
print(decimal_isolate(3_5.3_4_5, 2))
print(decimal_isolate(3_5.3_4_5, 3))
print(decimal_isolate(-1_4.7_8_9, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-1_4.1_2_3, 1))
print(decimal_isolate(-1_4.1_2_3, 2))
print(decimal_isolate(-1_4.1_2_3, 3))
| 29 |
from __future__ import annotations
import math
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase ) -> None:
lowerCAmelCase_ : str = size
# approximate the overall size of segment tree with given value
lowerCAmelCase_ : Dict = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
lowerCAmelCase_ : Dict = [0 for i in range(0 , 4 * size )]
lowerCAmelCase_ : Optional[int] = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowercase_ ( self , __lowercase ) -> int:
return idx * 2
def lowercase_ ( self , __lowercase ) -> int:
return idx * 2 + 1
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> None:
if left_element == right_element:
lowerCAmelCase_ : Tuple = a[left_element - 1]
else:
lowerCAmelCase_ : int = (left_element + right_element) // 2
self.build(self.left(__lowercase ) , __lowercase , __lowercase , __lowercase )
self.build(self.right(__lowercase ) , mid + 1 , __lowercase , __lowercase )
lowerCAmelCase_ : Any = max(
self.segment_tree[self.left(__lowercase )] , self.segment_tree[self.right(__lowercase )] )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> bool:
if self.flag[idx] is True:
lowerCAmelCase_ : Union[str, Any] = self.lazy[idx]
lowerCAmelCase_ : Union[str, Any] = False
if left_element != right_element:
lowerCAmelCase_ : Union[str, Any] = self.lazy[idx]
lowerCAmelCase_ : Any = self.lazy[idx]
lowerCAmelCase_ : List[str] = True
lowerCAmelCase_ : Optional[Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
lowerCAmelCase_ : Dict = val
if left_element != right_element:
lowerCAmelCase_ : Union[str, Any] = val
lowerCAmelCase_ : List[Any] = val
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : List[str] = True
return True
lowerCAmelCase_ : Optional[Any] = (left_element + right_element) // 2
self.update(self.left(__lowercase ) , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
self.update(self.right(__lowercase ) , mid + 1 , __lowercase , __lowercase , __lowercase , __lowercase )
lowerCAmelCase_ : int = max(
self.segment_tree[self.left(__lowercase )] , self.segment_tree[self.right(__lowercase )] )
return True
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> int | float:
if self.flag[idx] is True:
lowerCAmelCase_ : Union[str, Any] = self.lazy[idx]
lowerCAmelCase_ : Optional[Any] = False
if left_element != right_element:
lowerCAmelCase_ : List[Any] = self.lazy[idx]
lowerCAmelCase_ : Dict = self.lazy[idx]
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : Optional[int] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
lowerCAmelCase_ : List[Any] = (left_element + right_element) // 2
lowerCAmelCase_ : Tuple = self.query(self.left(__lowercase ) , __lowercase , __lowercase , __lowercase , __lowercase )
lowerCAmelCase_ : List[Any] = self.query(self.right(__lowercase ) , mid + 1 , __lowercase , __lowercase , __lowercase )
return max(__lowercase , __lowercase )
def __str__( self ) -> str:
return str([self.query(1 , 1 , self.size , __lowercase , __lowercase ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_UpperCAmelCase : str =[1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
_UpperCAmelCase : List[str] =15
_UpperCAmelCase : Any =SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt) | 262 | 0 |
import qiskit
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Optional[Any] ) -> qiskit.result.counts.Counts:
__snake_case = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
__snake_case = qiskit.QuantumCircuit(lowerCAmelCase_ , lowerCAmelCase_ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__snake_case = qiskit.execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowerCAmelCase_ )
if __name__ == "__main__":
print(F'Total count for various states are: {single_qubit_measure(1, 1)}')
| 24 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_UpperCAmelCase : Optional[int] ="""src/transformers"""
_UpperCAmelCase : str ="""docs/source/en"""
_UpperCAmelCase : Optional[int] ="""."""
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]:
with open(lowerCAmelCase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase_ : int = f.readlines()
# Find the start prompt.
lowerCAmelCase_ : List[Any] = 0
while not lines[start_index].startswith(lowerCAmelCase_ ):
start_index += 1
start_index += 1
lowerCAmelCase_ : List[str] = start_index
while not lines[end_index].startswith(lowerCAmelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_UpperCAmelCase : Optional[Any] ="""Model|Encoder|Decoder|ForConditionalGeneration"""
# Regexes that match TF/Flax/PT model names.
_UpperCAmelCase : Optional[int] =re.compile(R"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
_UpperCAmelCase : Dict =re.compile(R"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_UpperCAmelCase : Optional[Any] =re.compile(R"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# This is to make sure the transformers module imported is the one in the repo.
_UpperCAmelCase : Optional[int] =direct_transformers_import(TRANSFORMERS_PATH)
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[int]:
lowerCAmelCase_ : str = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , lowerCAmelCase_ )
return [m.group(0 ) for m in matches]
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
lowerCAmelCase_ : Tuple = 2 if text == '''✅''' or text == '''❌''' else len(lowerCAmelCase_ )
lowerCAmelCase_ : int = (width - text_length) // 2
lowerCAmelCase_ : Union[str, Any] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowerCAmelCase ( )-> str:
lowerCAmelCase_ : Any = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowerCAmelCase_ : Dict = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowerCAmelCase_ : List[Any] = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowerCAmelCase_ : Tuple = collections.defaultdict(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = collections.defaultdict(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[Any] = collections.defaultdict(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = collections.defaultdict(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = collections.defaultdict(lowerCAmelCase_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowerCAmelCase_ ):
lowerCAmelCase_ : Optional[int] = None
if attr_name.endswith('''Tokenizer''' ):
lowerCAmelCase_ : Union[str, Any] = slow_tokenizers
lowerCAmelCase_ : List[str] = attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
lowerCAmelCase_ : int = fast_tokenizers
lowerCAmelCase_ : Union[str, Any] = attr_name[:-13]
elif _re_tf_models.match(lowerCAmelCase_ ) is not None:
lowerCAmelCase_ : Tuple = tf_models
lowerCAmelCase_ : str = _re_tf_models.match(lowerCAmelCase_ ).groups()[0]
elif _re_flax_models.match(lowerCAmelCase_ ) is not None:
lowerCAmelCase_ : Tuple = flax_models
lowerCAmelCase_ : Union[str, Any] = _re_flax_models.match(lowerCAmelCase_ ).groups()[0]
elif _re_pt_models.match(lowerCAmelCase_ ) is not None:
lowerCAmelCase_ : Any = pt_models
lowerCAmelCase_ : List[Any] = _re_pt_models.match(lowerCAmelCase_ ).groups()[0]
if lookup_dict is not None:
while len(lowerCAmelCase_ ) > 0:
if attr_name in model_name_to_prefix.values():
lowerCAmelCase_ : Union[str, Any] = True
break
# Try again after removing the last word in the name
lowerCAmelCase_ : Any = ''''''.join(camel_case_split(lowerCAmelCase_ )[:-1] )
# Let's build that table!
lowerCAmelCase_ : int = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
lowerCAmelCase_ : Tuple = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowerCAmelCase_ : Union[str, Any] = [len(lowerCAmelCase_ ) + 2 for c in columns]
lowerCAmelCase_ : Optional[Any] = max([len(lowerCAmelCase_ ) for name in model_names] ) + 2
# Build the table per se
lowerCAmelCase_ : Dict = '''|''' + '''|'''.join([_center_text(lowerCAmelCase_ , lowerCAmelCase_ ) for c, w in zip(lowerCAmelCase_ , lowerCAmelCase_ )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
lowerCAmelCase_ : List[str] = {True: '''✅''', False: '''❌'''}
for name in model_names:
lowerCAmelCase_ : List[Any] = model_name_to_prefix[name]
lowerCAmelCase_ : Union[str, Any] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowerCAmelCase_ , lowerCAmelCase_ ) for l, w in zip(lowerCAmelCase_ , lowerCAmelCase_ )] ) + "|\n"
return table
def lowerCAmelCase ( lowerCAmelCase_=False )-> Tuple:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = _find_text_in_file(
filename=os.path.join(lowerCAmelCase_ , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , )
lowerCAmelCase_ : Tuple = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowerCAmelCase_ , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] =argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_UpperCAmelCase : Tuple =parser.parse_args()
check_model_table(args.fix_and_overwrite) | 262 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class __A ( UpperCAmelCase__ ):
def _lowercase (self : str ):
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = 8
# DPR tok
UpperCAmelCase_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCAmelCase_ = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(__lowercase , exist_ok=__lowercase )
UpperCAmelCase_ = os.path.join(__lowercase , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
UpperCAmelCase_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
UpperCAmelCase_ = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
UpperCAmelCase_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCAmelCase_ = {'''unk_token''': '''<unk>'''}
UpperCAmelCase_ = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(__lowercase , exist_ok=__lowercase )
UpperCAmelCase_ = os.path.join(__lowercase , BART_VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(__lowercase , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowercase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowercase ) )
def _lowercase (self : Optional[int] ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def _lowercase (self : Union[str, Any] ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def _lowercase (self : List[str] ):
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def _lowercase (self : int ):
UpperCAmelCase_ = os.path.join(self.tmpdirname , "rag_tokenizer" )
UpperCAmelCase_ = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
UpperCAmelCase_ = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(__lowercase )
rag_tokenizer.save_pretrained(__lowercase )
UpperCAmelCase_ = RagTokenizer.from_pretrained(__lowercase , config=__lowercase )
self.assertIsInstance(new_rag_tokenizer.question_encoder , __lowercase )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , __lowercase )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def _lowercase (self : Tuple ):
UpperCAmelCase_ = RagTokenizer.from_pretrained("facebook/rag-token-nq" )
UpperCAmelCase_ = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
UpperCAmelCase_ = tokenizer(__lowercase )
self.assertIsNotNone(__lowercase )
@slow
def _lowercase (self : List[str] ):
UpperCAmelCase_ = RagTokenizer.from_pretrained("facebook/rag-sequence-nq" )
UpperCAmelCase_ = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
UpperCAmelCase_ = tokenizer(__lowercase )
self.assertIsNotNone(__lowercase )
| 1 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCAmelCase ( )-> int:
lowerCAmelCase_ : int = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
lowerCAmelCase_ : Dict = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(lowerCAmelCase_ )
DownloadCommand.register_subcommand(lowerCAmelCase_ )
EnvironmentCommand.register_subcommand(lowerCAmelCase_ )
RunCommand.register_subcommand(lowerCAmelCase_ )
ServeCommand.register_subcommand(lowerCAmelCase_ )
UserCommands.register_subcommand(lowerCAmelCase_ )
AddNewModelCommand.register_subcommand(lowerCAmelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCAmelCase_ )
LfsCommands.register_subcommand(lowerCAmelCase_ )
PTtoTFCommand.register_subcommand(lowerCAmelCase_ )
# Let's go
lowerCAmelCase_ : Union[str, Any] = parser.parse_args()
if not hasattr(lowerCAmelCase_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
lowerCAmelCase_ : List[Any] = args.func(lowerCAmelCase_ )
service.run()
if __name__ == "__main__":
main() | 262 | 0 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> int:
'''simple docstring'''
snake_case_ = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
snake_case_ = n - k
# Calculate C(n,k)
for i in range(lowerCAmelCase_ ):
result *= n - i
result //= i + 1
return result
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
return binomial_coefficient(2 * node_count, lowerCAmelCase_ ) // (node_count + 1)
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
if n < 0:
raise ValueError('''factorial() not defined for negative values''' )
snake_case_ = 1
for i in range(1, n + 1 ):
result *= i
return result
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
return catalan_number(lowerCAmelCase_ ) * factorial(lowerCAmelCase_ )
if __name__ == "__main__":
a : Dict = int(input('Enter the number of nodes: ').strip() or 0)
if node_count <= 0:
raise ValueError('We need some nodes to work with.')
print(
f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
f'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 56 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_UpperCAmelCase : Tuple =None
_UpperCAmelCase : int =logging.get_logger(__name__)
_UpperCAmelCase : Dict ={"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Any ={
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : int ={
"""facebook/nllb-large-en-ro""": 1024,
"""facebook/nllb-200-distilled-600M""": 1024,
}
# fmt: off
_UpperCAmelCase : Any =["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE__ : int = NllbTokenizer
SCREAMING_SNAKE_CASE__ : List[int] = []
SCREAMING_SNAKE_CASE__ : List[int] = []
def __init__( self , __lowercase=None , __lowercase=None , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=False , **__lowercase , ) -> int:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : int = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
lowerCAmelCase_ : List[Any] = legacy_behaviour
super().__init__(
vocab_file=__lowercase , tokenizer_file=__lowercase , bos_token=__lowercase , eos_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , src_lang=__lowercase , tgt_lang=__lowercase , additional_special_tokens=__lowercase , legacy_behaviour=__lowercase , **__lowercase , )
lowerCAmelCase_ : Any = vocab_file
lowerCAmelCase_ : List[Any] = False if not self.vocab_file else True
lowerCAmelCase_ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
lowerCAmelCase_ : Optional[Any] = {
lang_code: self.convert_tokens_to_ids(__lowercase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCAmelCase_ : Any = src_lang if src_lang is not None else '''eng_Latn'''
lowerCAmelCase_ : str = self.convert_tokens_to_ids(self._src_lang )
lowerCAmelCase_ : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowercase_ ( self ) -> str:
return self._src_lang
@src_lang.setter
def lowercase_ ( self , __lowercase ) -> None:
lowerCAmelCase_ : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , **__lowercase ) -> str:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowerCAmelCase_ : List[str] = src_lang
lowerCAmelCase_ : int = self(__lowercase , add_special_tokens=__lowercase , return_tensors=__lowercase , **__lowercase )
lowerCAmelCase_ : Dict = self.convert_tokens_to_ids(__lowercase )
lowerCAmelCase_ : List[Any] = tgt_lang_id
return inputs
def lowercase_ ( self , __lowercase , __lowercase = "eng_Latn" , __lowercase = None , __lowercase = "fra_Latn" , **__lowercase , ) -> BatchEncoding:
lowerCAmelCase_ : List[str] = src_lang
lowerCAmelCase_ : List[str] = tgt_lang
return super().prepare_seqaseq_batch(__lowercase , __lowercase , **__lowercase )
def lowercase_ ( self ) -> List[Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def lowercase_ ( self ) -> str:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase_ ( self , __lowercase ) -> None:
lowerCAmelCase_ : List[str] = self.convert_tokens_to_ids(__lowercase )
if self.legacy_behaviour:
lowerCAmelCase_ : Any = []
lowerCAmelCase_ : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase_ : Optional[int] = [self.cur_lang_code]
lowerCAmelCase_ : List[Any] = [self.eos_token_id]
lowerCAmelCase_ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase_ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase_ : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowercase_ ( self , __lowercase ) -> None:
lowerCAmelCase_ : Dict = self.convert_tokens_to_ids(__lowercase )
if self.legacy_behaviour:
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : Any = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase_ : Any = [self.cur_lang_code]
lowerCAmelCase_ : Any = [self.eos_token_id]
lowerCAmelCase_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase_ : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase_ : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
lowerCAmelCase_ : Any = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,) | 262 | 0 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
"""simple docstring"""
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
with open(__lowercase , encoding="utf-8" ) as input_file:
lowerCAmelCase : Tuple = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
lowerCAmelCase : Tuple = input_file.read()
lowerCAmelCase : Any = regexp.search(__lowercase )
return match
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
with open(__lowercase , encoding="utf-8" ) as input_file:
lowerCAmelCase : Optional[int] = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
lowerCAmelCase : Union[str, Any] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
lowerCAmelCase : Optional[int] = regexp.finditer(__lowercase )
lowerCAmelCase : Any = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = Path("./datasets" )
lowerCAmelCase : List[Any] = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__lowercase ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = Path("./datasets" )
lowerCAmelCase : Any = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__lowercase ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 108 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
_UpperCAmelCase : Optional[Any] =NewType("""DataClass""", Any)
_UpperCAmelCase : Dict =NewType("""DataClassType""", Any)
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def lowerCAmelCase ( lowerCAmelCase_ )-> Callable[[str], Any]:
lowerCAmelCase_ : str = {str(lowerCAmelCase_ ): choice for choice in choices}
return lambda lowerCAmelCase_ : str_to_choice.get(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( *,
lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = dataclasses.MISSING , lowerCAmelCase_ = dataclasses.MISSING , lowerCAmelCase_ = None , **lowerCAmelCase_ , )-> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowerCAmelCase_ : Dict = {}
if aliases is not None:
lowerCAmelCase_ : str = aliases
if help is not None:
lowerCAmelCase_ : Tuple = help
return dataclasses.field(metadata=lowerCAmelCase_ , default=lowerCAmelCase_ , default_factory=lowerCAmelCase_ , **lowerCAmelCase_ )
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Iterable[DataClassType]
def __init__( self , __lowercase , **__lowercase ) -> List[str]:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
lowerCAmelCase_ : Optional[int] = ArgumentDefaultsHelpFormatter
super().__init__(**__lowercase )
if dataclasses.is_dataclass(__lowercase ):
lowerCAmelCase_ : Union[str, Any] = [dataclass_types]
lowerCAmelCase_ : List[Any] = list(__lowercase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__lowercase )
@staticmethod
def lowercase_ ( __lowercase , __lowercase ) -> Union[str, Any]:
lowerCAmelCase_ : Optional[Any] = f"""--{field.name}"""
lowerCAmelCase_ : Tuple = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __lowercase ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
lowerCAmelCase_ : List[str] = kwargs.pop('''aliases''' , [] )
if isinstance(__lowercase , __lowercase ):
lowerCAmelCase_ : Optional[Any] = [aliases]
lowerCAmelCase_ : Any = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(__lowercase , '''UnionType''' ) and isinstance(__lowercase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__lowercase ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
f""" Problem encountered in field '{field.name}'.""" )
if type(__lowercase ) not in field.type.__args__:
# filter `str` in Union
lowerCAmelCase_ : List[Any] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowerCAmelCase_ : Dict = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowerCAmelCase_ : str = (
field.type.__args__[0] if isinstance(__lowercase , field.type.__args__[1] ) else field.type.__args__[1]
)
lowerCAmelCase_ : List[Any] = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowerCAmelCase_ : Dict = {}
if origin_type is Literal or (isinstance(field.type , __lowercase ) and issubclass(field.type , __lowercase )):
if origin_type is Literal:
lowerCAmelCase_ : Optional[Any] = field.type.__args__
else:
lowerCAmelCase_ : int = [x.value for x in field.type]
lowerCAmelCase_ : str = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
lowerCAmelCase_ : str = field.default
else:
lowerCAmelCase_ : Tuple = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowerCAmelCase_ : Tuple = copy(__lowercase )
# Hack because type=bool in argparse does not behave as we want.
lowerCAmelCase_ : Dict = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowerCAmelCase_ : Union[str, Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowerCAmelCase_ : List[str] = default
# This tells argparse we accept 0 or 1 value after --field_name
lowerCAmelCase_ : int = '''?'''
# This is the value that will get picked if we do --field_name (without value)
lowerCAmelCase_ : List[Any] = True
elif isclass(__lowercase ) and issubclass(__lowercase , __lowercase ):
lowerCAmelCase_ : Union[str, Any] = field.type.__args__[0]
lowerCAmelCase_ : Dict = '''+'''
if field.default_factory is not dataclasses.MISSING:
lowerCAmelCase_ : Any = field.default_factory()
elif field.default is dataclasses.MISSING:
lowerCAmelCase_ : Optional[int] = True
else:
lowerCAmelCase_ : List[Any] = field.type
if field.default is not dataclasses.MISSING:
lowerCAmelCase_ : Dict = field.default
elif field.default_factory is not dataclasses.MISSING:
lowerCAmelCase_ : List[Any] = field.default_factory()
else:
lowerCAmelCase_ : int = True
parser.add_argument(__lowercase , *__lowercase , **__lowercase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowerCAmelCase_ : Any = False
parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **__lowercase )
def lowercase_ ( self , __lowercase ) -> List[Any]:
if hasattr(__lowercase , '''_argument_group_name''' ):
lowerCAmelCase_ : str = self.add_argument_group(dtype._argument_group_name )
else:
lowerCAmelCase_ : Dict = self
try:
lowerCAmelCase_ : Dict[str, type] = get_type_hints(__lowercase )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(__lowercase ):
lowerCAmelCase_ : Any = '''.'''.join(map(__lowercase , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(__lowercase ):
if not field.init:
continue
lowerCAmelCase_ : Optional[int] = type_hints[field.name]
self._parse_dataclass_field(__lowercase , __lowercase )
def lowercase_ ( self , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=None , __lowercase=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowerCAmelCase_ : str = []
if args_filename:
args_files.append(Path(__lowercase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowerCAmelCase_ : str = ArgumentParser()
args_file_parser.add_argument(__lowercase , type=__lowercase , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = args_file_parser.parse_known_args(args=__lowercase )
lowerCAmelCase_ : int = vars(__lowercase ).get(args_file_flag.lstrip('''-''' ) , __lowercase )
if cmd_args_file_paths:
args_files.extend([Path(__lowercase ) for p in cmd_args_file_paths] )
lowerCAmelCase_ : Dict = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowerCAmelCase_ : Any = file_args + args if args is not None else file_args + sys.argv[1:]
lowerCAmelCase_ , lowerCAmelCase_ : Dict = self.parse_known_args(args=__lowercase )
lowerCAmelCase_ : Any = []
for dtype in self.dataclass_types:
lowerCAmelCase_ : str = {f.name for f in dataclasses.fields(__lowercase ) if f.init}
lowerCAmelCase_ : str = {k: v for k, v in vars(__lowercase ).items() if k in keys}
for k in keys:
delattr(__lowercase , __lowercase )
lowerCAmelCase_ : Optional[int] = dtype(**__lowercase )
outputs.append(__lowercase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__lowercase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def lowercase_ ( self , __lowercase , __lowercase = False ) -> Tuple[DataClass, ...]:
lowerCAmelCase_ : int = set(args.keys() )
lowerCAmelCase_ : str = []
for dtype in self.dataclass_types:
lowerCAmelCase_ : int = {f.name for f in dataclasses.fields(__lowercase ) if f.init}
lowerCAmelCase_ : List[str] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowerCAmelCase_ : List[str] = dtype(**__lowercase )
outputs.append(__lowercase )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__lowercase )}""" )
return tuple(__lowercase )
def lowercase_ ( self , __lowercase , __lowercase = False ) -> Tuple[DataClass, ...]:
with open(Path(__lowercase ) , encoding='''utf-8''' ) as open_json_file:
lowerCAmelCase_ : Dict = json.loads(open_json_file.read() )
lowerCAmelCase_ : str = self.parse_dict(__lowercase , allow_extra_keys=__lowercase )
return tuple(__lowercase )
def lowercase_ ( self , __lowercase , __lowercase = False ) -> Tuple[DataClass, ...]:
lowerCAmelCase_ : Optional[Any] = self.parse_dict(yaml.safe_load(Path(__lowercase ).read_text() ) , allow_extra_keys=__lowercase )
return tuple(__lowercase ) | 262 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class __lowerCAmelCase ( UpperCAmelCase__ ):
lowerCAmelCase__ = """audio-spectrogram-transformer"""
def __init__( self , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=10 , __UpperCAmelCase=1024 , __UpperCAmelCase=128 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__lowercase )
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = patch_size
__lowerCamelCase = qkv_bias
__lowerCamelCase = frequency_stride
__lowerCamelCase = time_stride
__lowerCamelCase = max_length
__lowerCamelCase = num_mel_bins
| 330 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def lowerCAmelCase ( lowerCAmelCase_ )-> Union[str, Any]:
return EnvironmentCommand()
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
@staticmethod
def lowercase_ ( __lowercase ) -> List[Any]:
lowerCAmelCase_ : List[str] = parser.add_parser('''env''' )
download_parser.set_defaults(func=__lowercase )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Optional[Any] = huggingface_hub.__version__
lowerCAmelCase_ : str = '''not installed'''
lowerCAmelCase_ : str = '''NA'''
if is_torch_available():
import torch
lowerCAmelCase_ : Any = torch.__version__
lowerCAmelCase_ : str = torch.cuda.is_available()
lowerCAmelCase_ : List[str] = '''not installed'''
if is_transformers_available():
import transformers
lowerCAmelCase_ : Any = transformers.__version__
lowerCAmelCase_ : Optional[Any] = '''not installed'''
if is_accelerate_available():
import accelerate
lowerCAmelCase_ : List[Any] = accelerate.__version__
lowerCAmelCase_ : List[str] = '''not installed'''
if is_xformers_available():
import xformers
lowerCAmelCase_ : Optional[Any] = xformers.__version__
lowerCAmelCase_ : int = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(__lowercase ) )
return info
@staticmethod
def lowercase_ ( __lowercase ) -> str:
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n" | 262 | 0 |
"""simple docstring"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
lowerCAmelCase_ = {
"""iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""",
"""iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""",
"""iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""",
"""mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""",
"""mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""",
"""mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""",
"""mask_downscaling.0""": """mask_embed.conv1""",
"""mask_downscaling.1""": """mask_embed.layer_norm1""",
"""mask_downscaling.3""": """mask_embed.conv2""",
"""mask_downscaling.4""": """mask_embed.layer_norm2""",
"""mask_downscaling.6""": """mask_embed.conv3""",
"""point_embeddings""": """point_embed""",
"""pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""",
"""image_encoder""": """vision_encoder""",
"""neck.0""": """neck.conv1""",
"""neck.1""": """neck.layer_norm1""",
"""neck.2""": """neck.conv2""",
"""neck.3""": """neck.layer_norm2""",
"""patch_embed.proj""": """patch_embed.projection""",
""".norm""": """.layer_norm""",
"""blocks""": """layers""",
}
def __UpperCAmelCase ( __lowerCamelCase ) -> Union[str, Any]:
lowercase__ : Optional[int] = {}
state_dict.pop('''pixel_mean''' , lowerCAmelCase_ )
state_dict.pop('''pixel_std''' , lowerCAmelCase_ )
lowercase__ : Optional[int] = r'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'''
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowercase__ : List[str] = key.replace(lowerCAmelCase_ , lowerCAmelCase_ )
if re.match(lowerCAmelCase_ , lowerCAmelCase_ ):
lowercase__ : Tuple = int(re.match(lowerCAmelCase_ , lowerCAmelCase_ ).group(2 ) )
if layer_nb == 0:
lowercase__ : Optional[Any] = key.replace('''layers.0''' , '''proj_in''' )
elif layer_nb == 1:
lowercase__ : Optional[Any] = key.replace('''layers.1''' , '''layers.0''' )
elif layer_nb == 2:
lowercase__ : int = key.replace('''layers.2''' , '''proj_out''' )
lowercase__ : List[Any] = value
lowercase__ : Optional[Any] = model_state_dict[
'''prompt_encoder.shared_embedding.positional_embedding'''
]
return model_state_dict
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase="ybelkada/segment-anything" ) -> Optional[Any]:
lowercase__ : Dict = hf_hub_download(lowerCAmelCase_ , f"""checkpoints/{model_name}.pth""" )
if "sam_vit_b" in model_name:
lowercase__ : Dict = SamConfig()
elif "sam_vit_l" in model_name:
lowercase__ : int = SamVisionConfig(
hidden_size=10_24 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
lowercase__ : Tuple = SamConfig(
vision_config=lowerCAmelCase_ , )
elif "sam_vit_h" in model_name:
lowercase__ : List[str] = SamVisionConfig(
hidden_size=12_80 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
lowercase__ : Union[str, Any] = SamConfig(
vision_config=lowerCAmelCase_ , )
lowercase__ : Union[str, Any] = torch.load(lowerCAmelCase_ , map_location='''cpu''' )
lowercase__ : Tuple = replace_keys(lowerCAmelCase_ )
lowercase__ : List[str] = SamImageProcessor()
lowercase__ : Optional[int] = SamProcessor(image_processor=lowerCAmelCase_ )
lowercase__ : int = SamModel(lowerCAmelCase_ )
hf_model.load_state_dict(lowerCAmelCase_ )
lowercase__ : Tuple = hf_model.to('''cuda''' )
lowercase__ : List[str] = '''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'''
lowercase__ : Tuple = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw ).convert('''RGB''' )
lowercase__ : Optional[Any] = [[[4_00, 6_50]]]
lowercase__ : Optional[int] = [[1]]
lowercase__ : Any = processor(images=np.array(lowerCAmelCase_ ) , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowercase__ : Tuple = hf_model(**lowerCAmelCase_ )
lowercase__ : str = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_7_9_8_9_0_2_5_1_1_5_9_6_6_8
lowercase__ : Union[str, Any] = processor(
images=np.array(lowerCAmelCase_ ) , input_points=lowerCAmelCase_ , input_labels=lowerCAmelCase_ , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowercase__ : Optional[int] = hf_model(**lowerCAmelCase_ )
lowercase__ : List[Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_7_1_2_6_0_3_0_9_2_1_9_3_6_0_4
lowercase__ : List[str] = ((75, 2_75, 17_25, 8_50),)
lowercase__ : Dict = processor(images=np.array(lowerCAmelCase_ ) , input_boxes=lowerCAmelCase_ , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowercase__ : List[str] = hf_model(**lowerCAmelCase_ )
lowercase__ : str = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_6_8_6_0_1_5_6_0_5_9_2_6_5_1_4
# Test with 2 points and 1 image.
lowercase__ : Union[str, Any] = [[[4_00, 6_50], [8_00, 6_50]]]
lowercase__ : List[Any] = [[1, 1]]
lowercase__ : int = processor(
images=np.array(lowerCAmelCase_ ) , input_points=lowerCAmelCase_ , input_labels=lowerCAmelCase_ , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowercase__ : List[Any] = hf_model(**lowerCAmelCase_ )
lowercase__ : Optional[int] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_9_3_6_0_4_7_7_9_2_4_3_4_6_9_2
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
lowerCAmelCase_ = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""]
parser.add_argument(
'--model_name',
default='sam_vit_h_4b8939',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
parser.add_argument(
'--model_hub_id',
default='ybelkada/segment-anything',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
lowerCAmelCase_ = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 16 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class snake_case__( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = JukeboxTokenizer
SCREAMING_SNAKE_CASE__ : int = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def lowercase_ ( self ) -> Union[str, Any]:
import torch
lowerCAmelCase_ : Union[str, Any] = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
lowerCAmelCase_ : Any = tokenizer(**self.metas )['''input_ids''']
# fmt: off
lowerCAmelCase_ : List[str] = [
torch.tensor([[
0, 0, 0, 7_1_6_9, 5_0_7, 9, 7_6, 3_9, 3_1, 4_6, 7_6, 2_7,
7_6, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8, 3_1, 4_4, 7_6, 3_2,
4_4, 4_1, 3_9, 7_6, 2_7, 4_0, 7_6, 2_7, 4_0, 4_6, 3_5, 4_3,
4_7, 3_1, 7_6, 3_8, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 4_1, 7_6, 4_5, 2_7, 3_5,
3_0, 7_6, 7_1, 2_0, 4_9, 4_1, 7_6, 4_8, 2_7, 4_5, 4_6, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 4_4, 4_7, 4_0, 3_7, 3_8, 3_1, 4_5,
4_5, 7_6, 3_8, 3_1, 3_3, 4_5, 7_6, 4_1, 3_2, 7_6, 4_5, 4_6,
4_1, 4_0, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
1_9, 4_6, 2_7, 4_0, 3_0, 7_6, 3_5, 4_0, 7_6, 4_6, 3_4, 3_1,
7_6, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3, 7_6, 6_3, 7_6, 6_3,
7_6, 6_3, 7_6, 1_4, 3_1, 2_7, 4_4, 7_6, 4_6, 3_4, 3_1, 3_9,
6_4, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_5, 2_7, 4_0,
3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 8,
2_7, 3_8, 3_2, 7_6, 4_5, 4_7, 4_0, 3_7, 7_6, 2_7, 7_6, 4_5,
3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0, 7_6, 4_8, 3_5, 4_5,
2_7, 3_3, 3_1, 7_6, 3_8, 3_5, 3_1, 4_5, 6_4, 7_6, 4_9, 3_4,
4_1, 4_5, 3_1, 7_6, 3_2, 4_4, 4_1, 4_9, 4_0, 6_4, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6, 4_9,
4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_6, 3_8, 3_5, 4_2, 6_4,
7_6, 2_7, 4_0, 3_0, 7_6, 4_5, 4_0, 3_1, 3_1, 4_4, 7_6, 4_1,
3_2, 7_6, 2_9, 4_1, 3_8, 3_0, 7_6, 2_9, 4_1, 3_9, 3_9, 2_7,
4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_5, 4_6,
4_5, 7_6, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6, 4_1, 4_4, 7_6, 4_9,
3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 4_1, 4_5, 3_1, 7_6, 4_2, 2_7,
4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_6, 4_4, 3_1, 2_7, 3_0, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 3_5, 2_9,
3_4, 7_6, 5_1, 3_1, 4_6, 7_6, 4_5, 4_7, 4_4, 4_8, 3_5, 4_8,
3_1, 6_4, 7_6, 4_5, 4_6, 2_7, 3_9, 4_2, 3_1, 3_0, 7_6, 4_1,
4_0, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 3_8, 3_5, 3_2, 3_1,
3_8, 3_1, 4_5, 4_5, 7_6, 4_6, 3_4, 3_5, 4_0, 3_3, 4_5, 6_4,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_4, 3_1,
7_6, 3_4, 2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_9,
4_1, 2_9, 3_7, 3_1, 3_0, 7_6, 4_6, 3_4, 3_1, 3_9, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_4, 3_1, 2_7, 4_4,
4_6, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_2, 3_1, 3_0, 6_6, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6,
4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_2, 3_1, 3_0, 3_1, 4_5,
4_6, 2_7, 3_8, 6_4, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 4_9,
4_1, 4_4, 3_0, 4_5, 7_6, 2_7, 4_2, 4_2, 3_1, 2_7, 4_4, 6_5,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_3, 5_1, 7_6,
4_0, 2_7, 3_9, 3_1, 7_6, 3_5, 4_5, 7_6, 1_5, 5_2, 5_1, 3_9,
2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_6, 1_1, 3_5, 4_0, 3_3,
7_6, 4_1, 3_2, 7_6, 1_1, 3_5, 4_0, 3_3, 4_5, 6_6, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_2, 4_1, 4_1, 3_7, 7_6,
4_1, 4_0, 7_6, 3_9, 5_1, 7_6, 2_3, 4_1, 4_4, 3_7, 4_5, 6_4,
7_6, 5_1, 3_1, 7_6, 1_3, 3_5, 3_3, 3_4, 4_6, 5_1, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 3_0, 3_1, 4_5, 4_2, 2_7, 3_5, 4_4, 6_7,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_4, 4_1, 4_6,
3_4, 3_5, 4_0, 3_3, 7_6, 2_8, 3_1, 4_5, 3_5, 3_0, 3_1, 7_6,
4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3, 7_6, 1_8, 4_1, 4_7,
4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_0, 3_1, 2_9, 2_7, 5_1,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_5, 3_2, 7_6,
4_6, 3_4, 2_7, 4_6, 7_6, 2_9, 4_1, 3_8, 4_1, 4_5, 4_5, 2_7,
3_8, 7_6, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4, 7_6, 2_8, 4_1, 4_7,
4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_6, 2_7, 4_0, 3_0, 7_6, 2_8,
2_7, 4_4, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_4, 3_1, 7_6, 3_8, 4_1, 4_0, 3_1, 7_6, 2_7, 4_0, 3_0,
7_6, 3_8, 3_1, 4_8, 3_1, 3_8, 7_6, 4_5, 2_7, 4_0, 3_0, 4_5,
7_6, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4, 7_6, 3_2, 2_7, 4_4,
7_6, 2_7, 4_9, 2_7, 5_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
7_6, 7_6]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def lowercase_ ( self ) -> List[Any]:
import torch
lowerCAmelCase_ : Any = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
lowerCAmelCase_ : str = tokenizer(**self.metas )['''input_ids''']
# fmt: off
lowerCAmelCase_ : Tuple = [
torch.tensor([[
0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1, 9, 7_7, 3_9,
3_1, 4_6, 7_7, 2_7, 7_7, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8,
3_1, 4_4, 7_7, 3_2, 4_4, 4_1, 3_9, 7_7, 2_7, 4_0, 7_7, 2_7,
4_0, 4_6, 3_5, 4_3, 4_7, 3_1, 7_7, 3_8, 2_7, 4_0, 3_0, 6_4,
7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_3, 3_4, 4_1,
7_7, 4_5, 2_7, 3_5, 3_0, 7_7, 7_2, 2_0, 4_9, 4_1, 7_7, 4_8,
2_7, 4_5, 4_6, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 4_4, 4_7, 4_0,
3_7, 3_8, 3_1, 4_5, 4_5, 7_7, 3_8, 3_1, 3_3, 4_5, 7_7, 4_1,
3_2, 7_7, 4_5, 4_6, 4_1, 4_0, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 1_9, 4_6, 2_7, 4_0, 3_0, 7_7, 3_5, 4_0,
7_7, 4_6, 3_4, 3_1, 7_7, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3,
7_7, 6_3, 7_7, 6_3, 7_7, 6_3, 7_7, 1_4, 3_1, 2_7, 4_4, 7_7,
4_6, 3_4, 3_1, 3_9, 6_4, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1,
7_7, 4_5, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 8, 2_7, 3_8, 3_2, 7_7, 4_5, 4_7, 4_0, 3_7,
7_7, 2_7, 7_7, 4_5, 3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0,
7_7, 4_8, 3_5, 4_5, 2_7, 3_3, 3_1, 7_7, 3_8, 3_5, 3_1, 4_5,
6_4, 7_7, 4_9, 3_4, 4_1, 4_5, 3_1, 7_7, 3_2, 4_4, 4_1, 4_9,
4_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1,
4_0, 3_0, 7_7, 4_9, 4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_7,
3_8, 3_5, 4_2, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_5, 4_0, 3_1,
3_1, 4_4, 7_7, 4_1, 3_2, 7_7, 2_9, 4_1, 3_8, 3_0, 7_7, 2_9,
4_1, 3_9, 3_9, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 2_7,
4_6, 7_7, 3_5, 4_6, 4_5, 7_7, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6,
4_1, 4_4, 7_7, 4_9, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 4_1, 4_5,
3_1, 7_7, 4_2, 2_7, 4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_7, 4_4,
3_1, 2_7, 3_0, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
2_3, 3_4, 3_5, 2_9, 3_4, 7_7, 5_1, 3_1, 4_6, 7_7, 4_5, 4_7,
4_4, 4_8, 3_5, 4_8, 3_1, 6_4, 7_7, 4_5, 4_6, 2_7, 3_9, 4_2,
3_1, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 4_5, 3_1, 7_7,
3_8, 3_5, 3_2, 3_1, 3_8, 3_1, 4_5, 4_5, 7_7, 4_6, 3_4, 3_5,
4_0, 3_3, 4_5, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 2_0, 3_4, 3_1, 7_7, 3_4, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4,
2_7, 4_6, 7_7, 3_9, 4_1, 2_9, 3_7, 3_1, 3_0, 7_7, 4_6, 3_4,
3_1, 3_9, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7,
3_4, 3_1, 2_7, 4_4, 4_6, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_2,
3_1, 3_0, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
1, 4_0, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 4_2,
3_1, 3_0, 3_1, 4_5, 4_6, 2_7, 3_8, 6_4, 7_7, 4_6, 3_4, 3_1,
4_5, 3_1, 7_7, 4_9, 4_1, 4_4, 3_0, 4_5, 7_7, 2_7, 4_2, 4_2,
3_1, 2_7, 4_4, 6_5, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_3, 5_1, 7_7, 4_0, 2_7, 3_9, 3_1, 7_7, 3_5, 4_5, 7_7,
1_5, 5_2, 5_1, 3_9, 2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_7,
1_1, 3_5, 4_0, 3_3, 7_7, 4_1, 3_2, 7_7, 1_1, 3_5, 4_0, 3_3,
4_5, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_2,
4_1, 4_1, 3_7, 7_7, 4_1, 4_0, 7_7, 3_9, 5_1, 7_7, 2_3, 4_1,
4_4, 3_7, 4_5, 6_4, 7_7, 5_1, 3_1, 7_7, 1_3, 3_5, 3_3, 3_4,
4_6, 5_1, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 3_0, 3_1, 4_5, 4_2,
2_7, 3_5, 4_4, 6_7, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_4, 4_1, 4_6, 3_4, 3_5, 4_0, 3_3, 7_7, 2_8, 3_1, 4_5,
3_5, 3_0, 3_1, 7_7, 4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3,
7_7, 1_8, 4_1, 4_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_0,
3_1, 2_9, 2_7, 5_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_5, 3_2, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 2_9, 4_1, 3_8,
4_1, 4_5, 4_5, 2_7, 3_8, 7_7, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4,
7_7, 2_8, 4_1, 4_7, 4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_7, 2_7,
4_0, 3_0, 7_7, 2_8, 2_7, 4_4, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_4, 3_1, 7_7, 3_8, 4_1, 4_0, 3_1,
7_7, 2_7, 4_0, 3_0, 7_7, 3_8, 3_1, 4_8, 3_1, 3_8, 7_7, 4_5,
2_7, 4_0, 3_0, 4_5, 7_7, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4,
7_7, 3_2, 2_7, 4_4, 7_7, 2_7, 4_9, 2_7, 5_1, 7_9, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 7_7, 7_7]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) | 262 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __UpperCamelCase ( UpperCAmelCase__ ):
lowerCamelCase : int ="""Salesforce/blip-image-captioning-base"""
lowerCamelCase : Dict =(
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
lowerCamelCase : Optional[int] ="""image_captioner"""
lowerCamelCase : Optional[Any] =AutoModelForVisionaSeq
lowerCamelCase : Optional[Any] =["""image"""]
lowerCamelCase : str =["""text"""]
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
requires_backends(self , ["vision"] )
super().__init__(*__lowercase , **__lowercase )
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
return self.pre_processor(images=__lowercase , return_tensors="pt" )
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
return self.model.generate(**__lowercase )
def __a ( self , lowerCAmelCase__ ) -> str:
return self.pre_processor.batch_decode(__lowercase , skip_special_tokens=__lowercase )[0].strip()
| 105 |
from __future__ import annotations
import requests
def lowerCAmelCase ( lowerCAmelCase_ )-> dict:
lowerCAmelCase_ : List[Any] = f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(lowerCAmelCase_ ).json()
def lowerCAmelCase ( lowerCAmelCase_ = 10 )-> list[dict]:
lowerCAmelCase_ : List[Any] = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
lowerCAmelCase_ : Tuple = requests.get(lowerCAmelCase_ ).json()[:max_stories]
return [get_hackernews_story(lowerCAmelCase_ ) for story_id in story_ids]
def lowerCAmelCase ( lowerCAmelCase_ = 10 )-> str:
lowerCAmelCase_ : Optional[Any] = hackernews_top_stories(lowerCAmelCase_ )
return "\n".join('''* [{title}]({url})'''.format(**lowerCAmelCase_ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown()) | 262 | 0 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_UpperCamelCase: int = 1_6
_UpperCamelCase: Optional[Any] = 3_2
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase = 16 ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
lowercase : Dict = load_dataset('glue' , 'mrpc' )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase : Optional[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase : Any = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase : Optional[int] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase : Dict = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase : List[Any] = 16
elif accelerator.mixed_precision != "no":
lowercase : Any = 8
else:
lowercase : int = None
return tokenizer.pad(
lowerCAmelCase_ , padding='longest' , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors='pt' , )
# Instantiate dataloaders.
lowercase : List[Any] = DataLoader(
tokenized_datasets['train'] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
lowercase : Tuple = DataLoader(
tokenized_datasets['validation'] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_UpperCamelCase: Tuple = mocked_dataloaders # noqa: F811
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
'''simple docstring'''
if os.environ.get('TESTING_MOCKED_DATALOADERS' , lowerCAmelCase_ ) == "1":
lowercase : int = 2
# New Code #
lowercase : List[str] = int(args.gradient_accumulation_steps )
lowercase : Union[str, Any] = int(args.local_sgd_steps )
# Initialize accelerator
lowercase : List[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowerCAmelCase_ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase : int = config['''lr''']
lowercase : Any = int(config['num_epochs'] )
lowercase : Optional[int] = int(config['seed'] )
lowercase : Optional[Any] = int(config['batch_size'] )
lowercase : Tuple = evaluate.load('glue' , 'mrpc' )
set_seed(lowerCAmelCase_ )
lowercase : Optional[Any] = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase : str = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase : Dict = model.to(accelerator.device )
# Instantiate optimizer
lowercase : Optional[Any] = AdamW(params=model.parameters() , lr=lowerCAmelCase_ )
# Instantiate scheduler
lowercase : Dict = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=1_00 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase : Dict = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
with LocalSGD(
accelerator=lowerCAmelCase_ , model=lowerCAmelCase_ , local_sgd_steps=lowerCAmelCase_ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowerCAmelCase_ ):
lowercase : Union[str, Any] = model(**lowerCAmelCase_ )
lowercase : Tuple = output.loss
accelerator.backward(lowerCAmelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase : Optional[Any] = model(**lowerCAmelCase_ )
lowercase : Union[str, Any] = outputs.logits.argmax(dim=-1 )
lowercase : List[str] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
lowercase : List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , lowerCAmelCase_ )
def lowercase__ ( ) -> int:
'''simple docstring'''
lowercase : List[str] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=lowerCAmelCase_ , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument(
'--local_sgd_steps' , type=lowerCAmelCase_ , default=8 , help='Number of local SGD steps or None to disable local SGD' )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
lowercase : List[str] = parser.parse_args()
lowercase : List[Any] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 255 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : List[str] =get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_UpperCAmelCase : Optional[int] =25_0004
_UpperCAmelCase : Tuple =25_0020
@require_sentencepiece
@require_tokenizers
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = MBartTokenizer
SCREAMING_SNAKE_CASE__ : Dict = MBartTokenizerFast
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : List[str] = True
def lowercase_ ( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ : str = MBartTokenizer(__lowercase , keep_accents=__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[int] = MBartTokenizer(__lowercase , keep_accents=__lowercase )
lowerCAmelCase_ : Dict = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowerCAmelCase_ : Optional[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowerCAmelCase_ : Dict = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(
__lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def lowercase_ ( self ) -> Dict:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase_ : int = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase )
lowerCAmelCase_ : int = self.tokenizer_class.from_pretrained(__lowercase , **__lowercase )
lowerCAmelCase_ : Tuple = tempfile.mkdtemp()
lowerCAmelCase_ : Union[str, Any] = tokenizer_r.save_pretrained(__lowercase )
lowerCAmelCase_ : Dict = tokenizer_p.save_pretrained(__lowercase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCAmelCase_ : str = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(__lowercase , __lowercase )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : Tuple = tokenizer_r.from_pretrained(__lowercase )
lowerCAmelCase_ : Dict = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowercase )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase_ : Optional[Any] = tempfile.mkdtemp()
lowerCAmelCase_ : int = tokenizer_r.save_pretrained(__lowercase , legacy_format=__lowercase )
lowerCAmelCase_ : Tuple = tokenizer_p.save_pretrained(__lowercase )
# Checks it save with the same files
self.assertSequenceEqual(__lowercase , __lowercase )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : Optional[int] = tokenizer_r.from_pretrained(__lowercase )
lowerCAmelCase_ : List[Any] = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
shutil.rmtree(__lowercase )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase_ : Optional[Any] = tempfile.mkdtemp()
lowerCAmelCase_ : List[str] = tokenizer_r.save_pretrained(__lowercase , legacy_format=__lowercase )
lowerCAmelCase_ : Optional[int] = tokenizer_p.save_pretrained(__lowercase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : Dict = tokenizer_r.from_pretrained(__lowercase )
lowerCAmelCase_ : List[Any] = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
shutil.rmtree(__lowercase )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = """facebook/mbart-large-en-ro"""
SCREAMING_SNAKE_CASE__ : int = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
SCREAMING_SNAKE_CASE__ : str = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def lowercase_ ( cls ) -> Optional[int]:
lowerCAmelCase_ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
lowerCAmelCase_ : Optional[Any] = 1
return cls
def lowercase_ ( self ) -> Optional[Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 2_5_0_0_2_0 )
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowercase )
def lowercase_ ( self ) -> Any:
self.assertIn(__lowercase , self.tokenizer.all_special_ids )
lowerCAmelCase_ : Union[str, Any] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
lowerCAmelCase_ : Tuple = self.tokenizer.decode(__lowercase , skip_special_tokens=__lowercase )
lowerCAmelCase_ : List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertNotIn(self.tokenizer.eos_token , __lowercase )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Union[str, Any] = ['''this is gunna be a long sentence ''' * 2_0]
assert isinstance(src_text[0] , __lowercase )
lowerCAmelCase_ : str = 1_0
lowerCAmelCase_ : Tuple = self.tokenizer(__lowercase , max_length=__lowercase , truncation=__lowercase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __lowercase )
self.assertEqual(len(__lowercase ) , __lowercase )
def lowercase_ ( self ) -> int:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Any = tempfile.mkdtemp()
lowerCAmelCase_ : int = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : Optional[Any] = MBartTokenizer.from_pretrained(__lowercase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowercase )
@require_torch
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowercase , return_tensors='''pt''' )
lowerCAmelCase_ : Tuple = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowercase , truncation=__lowercase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
lowerCAmelCase_ : int = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
lowerCAmelCase_ : str = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowercase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Optional[Any] = self.tokenizer(self.src_text , padding=__lowercase , truncation=__lowercase , max_length=3 , return_tensors='''pt''' )
lowerCAmelCase_ : Any = self.tokenizer(
text_target=self.tgt_text , padding=__lowercase , truncation=__lowercase , max_length=1_0 , return_tensors='''pt''' )
lowerCAmelCase_ : int = targets['''input_ids''']
lowerCAmelCase_ : Optional[Any] = shift_tokens_right(__lowercase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Any = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(__lowercase ) , {
# A, test, EOS, en_XX
'''input_ids''': [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 2_5_0_0_0_1,
} , ) | 262 | 0 |
def lowerCamelCase__ ( _a):
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_) and number_of_steps > 0
), f"number_of_steps needs to be positive integer, your input {number_of_steps}"
if number_of_steps == 1:
return 1
SCREAMING_SNAKE_CASE : str = 1, 1
for _ in range(number_of_steps - 1):
SCREAMING_SNAKE_CASE : Union[str, Any] = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod() | 76 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = "cpu" , lowerCAmelCase_ = None )-> None:
lowerCAmelCase_ : str = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(lowerCAmelCase_ , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
lowerCAmelCase_ : int = v.half()
if save_path is None: # overwrite src_path
lowerCAmelCase_ : Tuple = src_path
torch.save(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
fire.Fire(convert) | 262 | 0 |
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
UpperCAmelCase__ : Tuple = TypeVar('T')
class lowerCAmelCase_ (Generic[T] ):
"""simple docstring"""
__UpperCamelCase : deque[T] # Cache store of keys
__UpperCamelCase : set[T] # References of the keys in cache
__UpperCamelCase : int = 10 # Maximum capacity of cache
def __init__(self , SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = deque()
SCREAMING_SNAKE_CASE__ : List[str] = set()
if not n:
SCREAMING_SNAKE_CASE__ : List[Any] = sys.maxsize
elif n < 0:
raise ValueError("""n should be an integer greater than 0.""" )
else:
SCREAMING_SNAKE_CASE__ : Dict = n
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
SCREAMING_SNAKE_CASE__ : List[Any] = self.dq_store.pop()
self.key_reference.remove(__lowercase )
else:
self.dq_store.remove(__lowercase )
self.dq_store.appendleft(__lowercase )
self.key_reference.add(__lowercase )
def __magic_name__ (self ) -> None:
"""simple docstring"""
for k in self.dq_store:
print(__lowercase )
def __repr__(self ) -> str:
"""simple docstring"""
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 25 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 262 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
"""configuration_jukebox""": [
"""JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""JukeboxConfig""",
"""JukeboxPriorConfig""",
"""JukeboxVQVAEConfig""",
],
"""tokenization_jukebox""": ["""JukeboxTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""JukeboxModel""",
"""JukeboxPreTrainedModel""",
"""JukeboxVQVAE""",
"""JukeboxPrior""",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Optional[Any] =logging.get_logger(__name__)
_UpperCAmelCase : str ={
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = """vit_mae"""
def __init__( self , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase=3_0_7_2 , __lowercase="gelu" , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=1e-12 , __lowercase=2_2_4 , __lowercase=1_6 , __lowercase=3 , __lowercase=True , __lowercase=1_6 , __lowercase=5_1_2 , __lowercase=8 , __lowercase=2_0_4_8 , __lowercase=0.75 , __lowercase=False , **__lowercase , ) -> str:
super().__init__(**__lowercase )
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : Any = num_hidden_layers
lowerCAmelCase_ : Any = num_attention_heads
lowerCAmelCase_ : int = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : int = hidden_dropout_prob
lowerCAmelCase_ : str = attention_probs_dropout_prob
lowerCAmelCase_ : List[str] = initializer_range
lowerCAmelCase_ : Dict = layer_norm_eps
lowerCAmelCase_ : Union[str, Any] = image_size
lowerCAmelCase_ : Optional[int] = patch_size
lowerCAmelCase_ : Tuple = num_channels
lowerCAmelCase_ : List[str] = qkv_bias
lowerCAmelCase_ : List[Any] = decoder_num_attention_heads
lowerCAmelCase_ : int = decoder_hidden_size
lowerCAmelCase_ : Optional[int] = decoder_num_hidden_layers
lowerCAmelCase_ : Tuple = decoder_intermediate_size
lowerCAmelCase_ : Tuple = mask_ratio
lowerCAmelCase_ : Any = norm_pix_loss | 262 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
A_ : Optional[torch.FloatTensor] = None
A_ : torch.FloatTensor = None
A_ : Optional[Tuple[torch.FloatTensor]] = None
A_ : Optional[Tuple[torch.FloatTensor]] = None
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__(self : Tuple , a__ : Dict=1 , a__ : Optional[int]=0 , a__ : int=2 , a__ : Optional[Any]=512 , a__ : List[str]="cls" , a__ : str=False , a__ : Dict=True , **a__ : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__snake_case = project_dim
__snake_case = pooler_fn
__snake_case = learn_encoder
__snake_case = use_attention_mask
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
A_ : List[Any] = [r"""pooler""", r"""logit_scale"""]
A_ : Optional[int] = [r"""position_ids""", r"""predictions.decoder.bias"""]
A_ : Dict = """roberta"""
A_ : Any = RobertaSeriesConfig
def __init__(self : List[Any] , a__ : List[str] ):
"""simple docstring"""
super().__init__(__lowercase )
__snake_case = XLMRobertaModel(__lowercase )
__snake_case = nn.Linear(config.hidden_size , config.project_dim )
__snake_case = getattr(__lowercase , '''has_pre_transformation''' , __lowercase )
if self.has_pre_transformation:
__snake_case = nn.Linear(config.hidden_size , config.project_dim )
__snake_case = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def a (self : int , a__ : Tuple = None , a__ : List[str] = None , a__ : str = None , a__ : Any = None , a__ : Optional[int] = None , a__ : int = None , a__ : Optional[int] = None , a__ : Any = None , a__ : Tuple = None , a__ : str = None , a__ : List[Any] = None , ):
"""simple docstring"""
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case = self.base_model(
input_ids=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , position_ids=__lowercase , head_mask=__lowercase , inputs_embeds=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , output_attentions=__lowercase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__lowercase , )
if self.has_pre_transformation:
__snake_case = outputs['''hidden_states'''][-2]
__snake_case = self.pre_LN(__lowercase )
__snake_case = self.transformation_pre(__lowercase )
return TransformationModelOutput(
projection_state=__lowercase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__snake_case = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__lowercase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 24 |
def lowerCAmelCase ( lowerCAmelCase_ = 10**9 )-> int:
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Optional[int] = 2
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : str = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
lowerCAmelCase_ : Any = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""") | 262 | 0 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_: Any =logging.get_logger(__name__)
class __A ( UpperCAmelCase__ ):
a__ : Optional[int] = ["""input_features""", """is_longer"""]
def __init__(self : int , __a : Tuple=64 , __a : str=48000 , __a : Tuple=480 , __a : List[Any]=10 , __a : Optional[int]=1024 , __a : Union[str, Any]=0.0 , __a : Tuple=False , __a : Tuple = 0 , __a : Dict = 14000 , __a : str = None , __a : List[str] = "fusion" , __a : int = "repeatpad" , **__a : Union[str, Any] , ):
super().__init__(
feature_size=__lowercase , sampling_rate=__lowercase , padding_value=__lowercase , return_attention_mask=__lowercase , **__lowercase , )
UpperCAmelCase_ = top_db
UpperCAmelCase_ = truncation
UpperCAmelCase_ = padding
UpperCAmelCase_ = fft_window_size
UpperCAmelCase_ = (fft_window_size >> 1) + 1
UpperCAmelCase_ = hop_length
UpperCAmelCase_ = max_length_s
UpperCAmelCase_ = max_length_s * sampling_rate
UpperCAmelCase_ = sampling_rate
UpperCAmelCase_ = frequency_min
UpperCAmelCase_ = frequency_max
UpperCAmelCase_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__lowercase , min_frequency=__lowercase , max_frequency=__lowercase , sampling_rate=__lowercase , norm=__lowercase , mel_scale="htk" , )
UpperCAmelCase_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__lowercase , min_frequency=__lowercase , max_frequency=__lowercase , sampling_rate=__lowercase , norm="slaney" , mel_scale="slaney" , )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _lowercase (self : Tuple , __a : Dict , __a : List[str] = None ):
UpperCAmelCase_ = spectrogram(
__lowercase , window_function(self.fft_window_size , "hann" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=__lowercase , log_mel="dB" , )
return log_mel_spectrogram.T
def _lowercase (self : Tuple , __a : List[str] , __a : Optional[Any] , __a : Optional[Any] ):
UpperCAmelCase_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
UpperCAmelCase_ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
UpperCAmelCase_ = [0]
# randomly choose index for each part
UpperCAmelCase_ = np.random.choice(ranges[0] )
UpperCAmelCase_ = np.random.choice(ranges[1] )
UpperCAmelCase_ = np.random.choice(ranges[2] )
UpperCAmelCase_ = mel[idx_front : idx_front + chunk_frames, :]
UpperCAmelCase_ = mel[idx_middle : idx_middle + chunk_frames, :]
UpperCAmelCase_ = mel[idx_back : idx_back + chunk_frames, :]
UpperCAmelCase_ = torch.tensor(mel[None, None, :] )
UpperCAmelCase_ = torch.nn.functional.interpolate(
__lowercase , size=[chunk_frames, 64] , mode="bilinear" , align_corners=__lowercase )
UpperCAmelCase_ = mel_shrink[0][0].numpy()
UpperCAmelCase_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def _lowercase (self : int , __a : str , __a : Union[str, Any] , __a : Any , __a : Union[str, Any] ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
UpperCAmelCase_ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
UpperCAmelCase_ = len(__lowercase ) - max_length
UpperCAmelCase_ = np.random.randint(0 , overflow + 1 )
UpperCAmelCase_ = waveform[idx : idx + max_length]
UpperCAmelCase_ = self._np_extract_fbank_features(__lowercase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
UpperCAmelCase_ = self._np_extract_fbank_features(__lowercase , self.mel_filters )
UpperCAmelCase_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
UpperCAmelCase_ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
UpperCAmelCase_ = np.stack([mel, mel, mel, mel] , axis=0 )
UpperCAmelCase_ = False
else:
UpperCAmelCase_ = self._random_mel_fusion(__lowercase , __lowercase , __lowercase )
UpperCAmelCase_ = True
else:
raise NotImplementedError(f"""data_truncating {truncation} not implemented""" )
else:
UpperCAmelCase_ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
UpperCAmelCase_ = int(max_length / len(__lowercase ) )
UpperCAmelCase_ = np.stack(np.tile(__lowercase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
UpperCAmelCase_ = int(max_length / len(__lowercase ) )
UpperCAmelCase_ = np.stack(np.tile(__lowercase , __lowercase ) )
UpperCAmelCase_ = np.pad(__lowercase , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0 )
if truncation == "fusion":
UpperCAmelCase_ = self._np_extract_fbank_features(__lowercase , self.mel_filters )
UpperCAmelCase_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
UpperCAmelCase_ = self._np_extract_fbank_features(__lowercase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__(self : Tuple , __a : List[str] , __a : int = None , __a : Tuple = None , __a : Dict = None , __a : Optional[Any] = None , __a : List[str] = None , **__a : Dict , ):
UpperCAmelCase_ = truncation if truncation is not None else self.truncation
UpperCAmelCase_ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
UpperCAmelCase_ = isinstance(__lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
UpperCAmelCase_ = is_batched_numpy or (
isinstance(__lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase_ = [np.asarray(__lowercase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__lowercase , np.ndarray ):
UpperCAmelCase_ = np.asarray(__lowercase , dtype=np.floataa )
elif isinstance(__lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase_ = [np.asarray(__lowercase )]
# convert to mel spectrogram, truncate and pad if needed.
UpperCAmelCase_ = [
self._get_input_mel(__lowercase , max_length if max_length else self.nb_max_samples , __lowercase , __lowercase )
for waveform in raw_speech
]
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for mel, longer in padded_inputs:
input_mel.append(__lowercase )
is_longer.append(__lowercase )
if truncation == "fusion" and sum(__lowercase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
UpperCAmelCase_ = np.random.randint(0 , len(__lowercase ) )
UpperCAmelCase_ = True
if isinstance(input_mel[0] , __lowercase ):
UpperCAmelCase_ = [np.asarray(__lowercase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
UpperCAmelCase_ = [[longer] for longer in is_longer]
UpperCAmelCase_ = {'''input_features''': input_mel, '''is_longer''': is_longer}
UpperCAmelCase_ = BatchFeature(__lowercase )
if return_tensors is not None:
UpperCAmelCase_ = input_features.convert_to_tensors(__lowercase )
return input_features
| 1 |
import inspect
import unittest
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> int:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def lowercase_ ( self ) -> List[str]:
import diffusers
from diffusers.dependency_versions_table import deps
lowerCAmelCase_ : Any = inspect.getmembers(__lowercase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
lowerCAmelCase_ : Optional[int] = '''k-diffusion'''
elif backend == "invisible_watermark":
lowerCAmelCase_ : Dict = '''invisible-watermark'''
assert backend in deps, f"""{backend} is not in the deps table!""" | 262 | 0 |
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __magic_name__ ( __UpperCAmelCase ) -> str:
'''simple docstring'''
snake_case_ = []
for line in lines:
snake_case_ = re.sub(r'''#.*''', '''''', lowerCAmelCase_ ) # remove comments
if line:
filtered_lines.append(lowerCAmelCase_ )
snake_case_ = '''\n'''.join(lowerCAmelCase_ )
# Make a hash from all this code
snake_case_ = full_str.encode('''utf-8''' )
return shaaaa(lowerCAmelCase_ ).hexdigest()
# get importable module names and hash for caching
a : Optional[Any] = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
a : Union[str, Any] = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
a : List[str] = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
a : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 56 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_UpperCAmelCase : Any =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__lowercase , **__lowercase ) -> None:
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , __lowercase , )
super().__init__(*__lowercase , **__lowercase ) | 262 | 0 |
"""simple docstring"""
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
return EnvironmentCommand()
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
"""simple docstring"""
@staticmethod
def lowercase__ ( snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = parser.add_parser("env" )
download_parser.set_defaults(func=__lowercase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = huggingface_hub.__version__
lowerCAmelCase : str = '''not installed'''
lowerCAmelCase : str = '''NA'''
if is_torch_available():
import torch
lowerCAmelCase : Any = torch.__version__
lowerCAmelCase : str = torch.cuda.is_available()
lowerCAmelCase : List[str] = '''not installed'''
if is_transformers_available():
import transformers
lowerCAmelCase : Any = transformers.__version__
lowerCAmelCase : Optional[Any] = '''not installed'''
if is_accelerate_available():
import accelerate
lowerCAmelCase : List[Any] = accelerate.__version__
lowerCAmelCase : List[str] = '''not installed'''
if is_xformers_available():
import xformers
lowerCAmelCase : Optional[Any] = xformers.__version__
lowerCAmelCase : int = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(__lowercase ) )
return info
@staticmethod
def lowercase__ ( snake_case__ ):
"""simple docstring"""
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 108 |
def lowerCAmelCase ( lowerCAmelCase_ )-> set:
lowerCAmelCase_ : Optional[int] = set()
# edges = list of graph's edges
lowerCAmelCase_ : List[str] = get_edges(lowerCAmelCase_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = edges.pop()
chosen_vertices.add(lowerCAmelCase_ )
chosen_vertices.add(lowerCAmelCase_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowerCAmelCase_ )
return chosen_vertices
def lowerCAmelCase ( lowerCAmelCase_ )-> set:
lowerCAmelCase_ : List[Any] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}") | 262 | 0 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
a_ = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCAmelCase__ ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , __lowercase , )
super().__init__(*__lowercase , **__lowercase )
| 330 |
from math import sqrt
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase_ : List[Any] = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase_ : Optional[int] = False
for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase_ : Tuple = False
break
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool"
return status
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase_ : Tuple = list(range(2 , n + 1 ) )
lowerCAmelCase_ : Optional[int] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase_ : str = 0
# filters actual prime numbers.
lowerCAmelCase_ : Optional[int] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase_ : List[Any] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCAmelCase_ ):
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase_ : int = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase_ : List[Any] = 2
lowerCAmelCase_ : Optional[int] = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase_ ):
while quotient != 1:
if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0):
ans.append(lowerCAmelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[int]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : Dict = 0
# prime factorization of 'number'
lowerCAmelCase_ : Any = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = max(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : List[Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ : Dict = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : int = min(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ )
), "'number' must been an int, even and > 2"
lowerCAmelCase_ : str = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase_ : int = get_prime_numbers(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = len(lowerCAmelCase_ )
# run variable for while-loops.
lowerCAmelCase_ : Union[str, Any] = 0
lowerCAmelCase_ : Tuple = None
# exit variable. for break up the loops
lowerCAmelCase_ : int = True
while i < len_pn and loop:
lowerCAmelCase_ : int = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase_ : Tuple = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (len(lowerCAmelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : int = 0
while numbera != 0:
lowerCAmelCase_ : str = numbera % numbera
lowerCAmelCase_ : List[Any] = numbera
lowerCAmelCase_ : Any = rest
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : List[Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ )
elif numbera == 1 or numbera == 1:
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : Union[str, Any] = max(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : Union[str, Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase_ : Optional[Any] = prime_fac_a.count(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ):
ans *= n
else:
lowerCAmelCase_ : List[str] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase_ : Optional[Any] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : Optional[int] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime(
lowerCAmelCase_ ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
assert (
is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase_ : Union[str, Any] = p_number_a + 1 # jump to the next number
lowerCAmelCase_ : Optional[int] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase_ : List[Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase_ : Union[str, Any] = get_divisors(lowerCAmelCase_ )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase_ : Optional[Any] = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase_ : Any = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Union[str, Any] = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase_ : Union[str, Any] = ans
ans += fiba
lowerCAmelCase_ : Optional[Any] = tmp
return ans | 262 | 0 |
"""simple docstring"""
lowerCAmelCase_ = """Tobias Carryer"""
from time import time
class __A :
'''simple docstring'''
def __init__( self : int ,_snake_case : str ,_snake_case : Dict ,_snake_case : Tuple ,_snake_case : Tuple=int(time() ) ) -> str: # noqa: B008
"""simple docstring"""
lowercase__ : Any = multiplier
lowercase__ : Tuple = increment
lowercase__ : Optional[Any] = modulo
lowercase__ : Optional[Any] = seed
def UpperCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
lowercase__ : Union[str, Any] = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
lowerCAmelCase_ = LinearCongruentialGenerator(1_664_525, 1_013_904_223, 2 << 31)
while True:
print(lcg.next_number())
| 16 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_UpperCAmelCase : Union[str, Any] ="""pt"""
elif is_tf_available():
_UpperCAmelCase : List[Any] ="""tf"""
else:
_UpperCAmelCase : Optional[int] ="""jax"""
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PerceiverTokenizer
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def lowercase_ ( self ) -> Optional[int]:
super().setUp()
lowerCAmelCase_ : str = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self ) -> Any:
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def lowercase_ ( self , **__lowercase ) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase=False , __lowercase=2_0 , __lowercase=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowerCAmelCase_ : Optional[Any] = []
for i in range(len(__lowercase ) ):
try:
lowerCAmelCase_ : List[str] = tokenizer.decode([i] , clean_up_tokenization_spaces=__lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCAmelCase_ : List[str] = list(filter(lambda __lowercase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , __lowercase ) )
lowerCAmelCase_ : Optional[int] = list(filter(lambda __lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowercase ) , __lowercase ) )
if max_length is not None and len(__lowercase ) > max_length:
lowerCAmelCase_ : Union[str, Any] = toks[:max_length]
if min_length is not None and len(__lowercase ) < min_length and len(__lowercase ) > 0:
while len(__lowercase ) < min_length:
lowerCAmelCase_ : Union[str, Any] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase_ : List[str] = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase_ : int = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
if " " not in output_txt and len(__lowercase ) > 1:
lowerCAmelCase_ : Optional[int] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowercase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowercase )
)
if with_prefix_space:
lowerCAmelCase_ : Any = ''' ''' + output_txt
lowerCAmelCase_ : List[str] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
return output_txt, output_ids
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : List[str] = self.perceiver_tokenizer
lowerCAmelCase_ : Any = '''Unicode €.'''
lowerCAmelCase_ : Dict = tokenizer(__lowercase )
lowerCAmelCase_ : Any = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['''input_ids'''] , __lowercase )
# decoding
lowerCAmelCase_ : str = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , '''[CLS]Unicode €.[SEP]''' )
lowerCAmelCase_ : Optional[int] = tokenizer('''e è é ê ë''' )
lowerCAmelCase_ : str = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['''input_ids'''] , __lowercase )
# decoding
lowerCAmelCase_ : int = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Any = self.perceiver_tokenizer
lowerCAmelCase_ : Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
lowerCAmelCase_ : Optional[int] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
lowerCAmelCase_ : Optional[int] = tokenizer(__lowercase , padding=__lowercase , return_tensors=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
if FRAMEWORK != "jax":
lowerCAmelCase_ : str = list(batch.input_ids.numpy()[0] )
else:
lowerCAmelCase_ : Union[str, Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__lowercase , __lowercase )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : int = self.perceiver_tokenizer
lowerCAmelCase_ : Optional[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCAmelCase_ : List[Any] = tokenizer(__lowercase , padding=__lowercase , return_tensors=__lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , __lowercase )
self.assertIn('''attention_mask''' , __lowercase )
self.assertNotIn('''decoder_input_ids''' , __lowercase )
self.assertNotIn('''decoder_attention_mask''' , __lowercase )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = self.perceiver_tokenizer
lowerCAmelCase_ : int = [
'''Summary of the text.''',
'''Another summary.''',
]
lowerCAmelCase_ : List[str] = tokenizer(
text_target=__lowercase , max_length=3_2 , padding='''max_length''' , truncation=__lowercase , return_tensors=__lowercase )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
def lowercase_ ( self ) -> Optional[Any]:
# safety check on max_len default value so we are sure the test works
lowerCAmelCase_ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
lowerCAmelCase_ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase_ : Union[str, Any] = tempfile.mkdtemp()
lowerCAmelCase_ : str = ''' He is very happy, UNwant\u00E9d,running'''
lowerCAmelCase_ : Optional[int] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : Any = tokenizer.__class__.from_pretrained(__lowercase )
lowerCAmelCase_ : Tuple = after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
shutil.rmtree(__lowercase )
lowerCAmelCase_ : Optional[int] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase_ : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase_ : List[str] = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
lowerCAmelCase_ : Any = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowerCAmelCase_ : str = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : str = tokenizer.__class__.from_pretrained(__lowercase )
lowerCAmelCase_ : Optional[Any] = after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
lowerCAmelCase_ : str = tokenizer.__class__.from_pretrained(__lowercase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(__lowercase )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowercase )
with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase_ : Tuple = json.load(__lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase_ : Any = json.load(__lowercase )
lowerCAmelCase_ : Optional[int] = [f"""<extra_id_{i}>""" for i in range(1_2_5 )]
lowerCAmelCase_ : Optional[Any] = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
lowerCAmelCase_ : Any = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowercase , __lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowercase , __lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase_ : int = tokenizer_class.from_pretrained(
__lowercase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase_ : Tuple = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=__lowercase )]
lowerCAmelCase_ : Dict = tokenizer_class.from_pretrained(
__lowercase , additional_special_tokens=__lowercase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Any = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , '''�''' )
def lowercase_ ( self ) -> Tuple:
pass
def lowercase_ ( self ) -> Any:
pass
def lowercase_ ( self ) -> Tuple:
pass
def lowercase_ ( self ) -> List[str]:
pass
def lowercase_ ( self ) -> Dict:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
lowerCAmelCase_ : Tuple = self.get_tokenizers(fast=__lowercase , do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase_ : List[str] = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
lowerCAmelCase_ : Optional[int] = tokenizer.convert_tokens_to_string(__lowercase )
self.assertIsInstance(__lowercase , __lowercase ) | 262 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] ) ->bool:
'''simple docstring'''
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
a : Optional[Any] = 4
a : Union[str, Any] = (1 << p) - 1
for _ in range(p - 2 ):
a : Optional[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 105 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class snake_case__:
'''simple docstring'''
@staticmethod
def lowercase_ ( *__lowercase , **__lowercase ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
@require_torch
class snake_case__( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCAmelCase_ : str = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def lowercase_ ( self , __lowercase , __lowercase ) -> str:
lowerCAmelCase_ : Tuple = object_detector(examples[0] , threshold=0.0 )
lowerCAmelCase_ : Dict = len(__lowercase )
self.assertGreater(__lowercase , 0 )
self.assertEqual(
__lowercase , [
{
'''score''': ANY(__lowercase ),
'''label''': ANY(__lowercase ),
'''box''': {'''xmin''': ANY(__lowercase ), '''ymin''': ANY(__lowercase ), '''xmax''': ANY(__lowercase ), '''ymax''': ANY(__lowercase )},
}
for i in range(__lowercase )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def lowercase_ ( self ) -> List[str]:
pass
@require_torch
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Union[str, Any] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCAmelCase_ : Union[str, Any] = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
] , )
lowerCAmelCase_ : Union[str, Any] = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
]
] , )
@require_torch
@slow
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Any = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase_ : Dict = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
] , )
lowerCAmelCase_ : Tuple = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def lowercase_ ( self ) -> List[str]:
pass
@require_torch
@slow
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Any = 0.2
lowerCAmelCase_ : List[Any] = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase_ : Optional[Any] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=__lowercase , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
] , )
@require_torch
@slow
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Dict = 2
lowerCAmelCase_ : Union[str, Any] = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase_ : Optional[Any] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=__lowercase , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
] , ) | 262 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_UpperCamelCase: List[str] = logging.get_logger(__name__)
_UpperCamelCase: str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCamelCase: str = {
"""vocab_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
_UpperCamelCase: str = {
"""yjernite/retribert-base-uncased""": 5_1_2,
}
_UpperCamelCase: List[str] = {
"""yjernite/retribert-base-uncased""": {"""do_lower_case""": True},
}
class a__ ( UpperCAmelCase__ ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase = RetriBertTokenizer
_lowerCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : Tuple, lowerCAmelCase : str=None, lowerCAmelCase : Optional[int]=None, lowerCAmelCase : int=True, lowerCAmelCase : Union[str, Any]="[UNK]", lowerCAmelCase : str="[SEP]", lowerCAmelCase : int="[PAD]", lowerCAmelCase : Union[str, Any]="[CLS]", lowerCAmelCase : List[Any]="[MASK]", lowerCAmelCase : Any=True, lowerCAmelCase : Optional[Any]=None, **lowerCAmelCase : Dict, ) -> Union[str, Any]:
super().__init__(
__lowercase, tokenizer_file=__lowercase, do_lower_case=__lowercase, unk_token=__lowercase, sep_token=__lowercase, pad_token=__lowercase, cls_token=__lowercase, mask_token=__lowercase, tokenize_chinese_chars=__lowercase, strip_accents=__lowercase, **__lowercase, )
lowercase : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase', __lowercase ) != do_lower_case
or normalizer_state.get('strip_accents', __lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars', __lowercase ) != tokenize_chinese_chars
):
lowercase : List[str] = getattr(__lowercase, normalizer_state.pop('type' ) )
lowercase : str = do_lower_case
lowercase : Tuple = strip_accents
lowercase : Tuple = tokenize_chinese_chars
lowercase : Tuple = normalizer_class(**__lowercase )
lowercase : int = do_lower_case
def lowercase ( self : List[Any], lowerCAmelCase : Dict, lowerCAmelCase : Union[str, Any]=None ) -> Optional[int]:
lowercase : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase ( self : Optional[int], lowerCAmelCase : Dict, lowerCAmelCase : Union[str, Any] = None ) -> List[int]:
lowercase : List[Any] = [self.sep_token_id]
lowercase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : str, lowerCAmelCase : str, lowerCAmelCase : Any = None ) -> Tuple[str]:
lowercase : Any = self._tokenizer.model.save(__lowercase, name=__lowercase )
return tuple(__lowercase )
| 255 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels | 262 | 0 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class _UpperCamelCase :
'''simple docstring'''
lowerCamelCase__ =42 # [batch_size x 3]
lowerCamelCase__ =42 # [batch_size x 3]
lowerCamelCase__ =42 # [batch_size x 3]
lowerCamelCase__ =42 # [batch_size x 3]
lowerCamelCase__ =42
lowerCamelCase__ =42
lowerCamelCase__ =42
lowerCamelCase__ =42
lowerCamelCase__ =42
def __UpperCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def __UpperCamelCase ( self : Any ) -> torch.Tensor:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = torch.arange(self.height * self.width )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.stack(
[
pixel_indices % self.width,
torch.div(__lowercase , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.shape
SCREAMING_SNAKE_CASE : int = int(np.prod(__lowercase ) )
SCREAMING_SNAKE_CASE : Tuple = self.get_image_coords()
SCREAMING_SNAKE_CASE : str = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_camera_rays(__lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = rays.view(__lowercase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def __UpperCamelCase ( self : Optional[int] , a : str ) -> torch.Tensor:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
SCREAMING_SNAKE_CASE : int = coords.view(__lowercase , -1 , 2 )
SCREAMING_SNAKE_CASE : Any = self.resolution()
SCREAMING_SNAKE_CASE : Optional[int] = self.fov()
SCREAMING_SNAKE_CASE : Optional[int] = (flat.float() / (res - 1)) * 2 - 1
SCREAMING_SNAKE_CASE : Optional[Any] = fracs * torch.tan(fov / 2 )
SCREAMING_SNAKE_CASE : Optional[int] = fracs.view(__lowercase , -1 , 2 )
SCREAMING_SNAKE_CASE : Optional[Any] = (
self.z.view(__lowercase , 1 , 3 )
+ self.x.view(__lowercase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__lowercase , 1 , 3 ) * fracs[:, :, 1:]
)
SCREAMING_SNAKE_CASE : Union[str, Any] = directions / directions.norm(dim=-1 , keepdim=__lowercase )
SCREAMING_SNAKE_CASE : str = torch.stack(
[
torch.broadcast_to(self.origin.view(__lowercase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__lowercase , *__lowercase , 2 , 3 )
def __UpperCamelCase ( self : Optional[Any] , a : Any , a : Union[str, Any] ) -> "DifferentiableProjectiveCamera":
"""simple docstring"""
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__lowercase , height=__lowercase , x_fov=self.x_fov , y_fov=self.y_fov , )
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Optional[Any] = []
for theta in np.linspace(0 , 2 * np.pi , num=20):
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([np.sin(lowerCAmelCase_), np.cos(lowerCAmelCase_), -0.5])
z /= np.sqrt(np.sum(z**2))
SCREAMING_SNAKE_CASE : List[str] = -z * 4
SCREAMING_SNAKE_CASE : Tuple = np.array([np.cos(lowerCAmelCase_), -np.sin(lowerCAmelCase_), 0.0])
SCREAMING_SNAKE_CASE : List[str] = np.cross(lowerCAmelCase_ , lowerCAmelCase_)
origins.append(lowerCAmelCase_)
xs.append(lowerCAmelCase_)
ys.append(lowerCAmelCase_)
zs.append(lowerCAmelCase_)
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowerCAmelCase_ , axis=0)).float() , x=torch.from_numpy(np.stack(lowerCAmelCase_ , axis=0)).float() , y=torch.from_numpy(np.stack(lowerCAmelCase_ , axis=0)).float() , z=torch.from_numpy(np.stack(lowerCAmelCase_ , axis=0)).float() , width=lowerCAmelCase_ , height=lowerCAmelCase_ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowerCAmelCase_)) , ) | 76 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
_UpperCAmelCase : Dict ={
"""susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""",
"""susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""",
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """ernie_m"""
SCREAMING_SNAKE_CASE__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __lowercase = 2_5_0_0_0_2 , __lowercase = 7_6_8 , __lowercase = 1_2 , __lowercase = 1_2 , __lowercase = 3_0_7_2 , __lowercase = "gelu" , __lowercase = 0.1 , __lowercase = 0.1 , __lowercase = 5_1_4 , __lowercase = 0.02 , __lowercase = 1 , __lowercase = 1e-05 , __lowercase=None , __lowercase=False , __lowercase=0.0 , **__lowercase , ) -> Tuple:
super().__init__(pad_token_id=__lowercase , **__lowercase )
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : Tuple = num_hidden_layers
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : Dict = intermediate_size
lowerCAmelCase_ : int = hidden_act
lowerCAmelCase_ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase_ : Any = attention_probs_dropout_prob
lowerCAmelCase_ : Union[str, Any] = max_position_embeddings
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : List[str] = layer_norm_eps
lowerCAmelCase_ : List[Any] = classifier_dropout
lowerCAmelCase_ : Any = is_decoder
lowerCAmelCase_ : List[Any] = act_dropout | 262 | 0 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[int] = set()
# edges = list of graph's edges
SCREAMING_SNAKE_CASE__ : List[str] = get_edges(lowerCAmelCase_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
SCREAMING_SNAKE_CASE__ : Optional[int] = edges.pop()
chosen_vertices.add(lowerCAmelCase_ )
chosen_vertices.add(lowerCAmelCase_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowerCAmelCase_ )
return chosen_vertices
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 25 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=7 , __lowercase=3 , __lowercase=1_8 , __lowercase=3_0 , __lowercase=4_0_0 , __lowercase=True , __lowercase=None , __lowercase=True , __lowercase=None , ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = size if size is not None else {'''shortest_edge''': 2_0}
lowerCAmelCase_ : Any = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : Any = batch_size
lowerCAmelCase_ : Optional[int] = num_channels
lowerCAmelCase_ : Tuple = image_size
lowerCAmelCase_ : List[str] = min_resolution
lowerCAmelCase_ : Dict = max_resolution
lowerCAmelCase_ : Tuple = do_resize
lowerCAmelCase_ : Optional[Any] = size
lowerCAmelCase_ : Union[str, Any] = do_center_crop
lowerCAmelCase_ : Optional[Any] = crop_size
def lowercase_ ( self ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = MobileNetVaImageProcessingTester(self )
@property
def lowercase_ ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowercase , '''size''' ) )
self.assertTrue(hasattr(__lowercase , '''do_center_crop''' ) )
self.assertTrue(hasattr(__lowercase , '''crop_size''' ) )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 2_0} )
self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} )
lowerCAmelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} )
def lowercase_ ( self ) -> Tuple:
pass
def lowercase_ ( self ) -> Union[str, Any]:
# Initialize image_processing
lowerCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
lowerCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Tuple = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase_ ( self ) -> Optional[int]:
# Initialize image_processing
lowerCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
lowerCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Tuple = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase_ ( self ) -> Any:
# Initialize image_processing
lowerCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
lowerCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Dict = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , ) | 262 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase (UpperCAmelCase__ ):
'''simple docstring'''
_snake_case : int = ["""image_processor""", """tokenizer"""]
_snake_case : List[Any] = """LayoutLMv2ImageProcessor"""
_snake_case : int = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> int:
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __lowercase , )
UpperCAmelCase_ : List[str] = kwargs.pop('feature_extractor' )
UpperCAmelCase_ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__lowercase , __lowercase )
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , **_UpperCamelCase , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
UpperCAmelCase_ : Optional[int] = self.image_processor(images=__lowercase , return_tensors=__lowercase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__lowercase , __lowercase ):
UpperCAmelCase_ : Optional[int] = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase_ : Any = features['''words''']
UpperCAmelCase_ : Union[str, Any] = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=__lowercase , add_special_tokens=__lowercase , padding=__lowercase , truncation=__lowercase , max_length=__lowercase , stride=__lowercase , pad_to_multiple_of=__lowercase , return_token_type_ids=__lowercase , return_attention_mask=__lowercase , return_overflowing_tokens=__lowercase , return_special_tokens_mask=__lowercase , return_offsets_mapping=__lowercase , return_length=__lowercase , verbose=__lowercase , return_tensors=__lowercase , **__lowercase , )
# add pixel values
UpperCAmelCase_ : Any = features.pop('pixel_values' )
if return_overflowing_tokens is True:
UpperCAmelCase_ : str = self.get_overflowing_images(__lowercase , encoded_inputs['overflow_to_sample_mapping'] )
UpperCAmelCase_ : List[str] = images
return encoded_inputs
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> int:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase_ : int = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__lowercase ) != len(__lowercase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f" {len(__lowercase )} and {len(__lowercase )}" )
return images_with_overflow
def __UpperCAmelCase ( self , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
return self.tokenizer.batch_decode(*__lowercase , **__lowercase )
def __UpperCAmelCase ( self , *_UpperCamelCase , **_UpperCamelCase ) -> Any:
return self.tokenizer.decode(*__lowercase , **__lowercase )
@property
def __UpperCAmelCase ( self ) -> Dict:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def __UpperCAmelCase ( self ) -> Tuple:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __lowercase , )
return self.image_processor_class
@property
def __UpperCAmelCase ( self ) -> Any:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __lowercase , )
return self.image_processor
| 29 |
from __future__ import annotations
import math
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase ) -> None:
lowerCAmelCase_ : str = size
# approximate the overall size of segment tree with given value
lowerCAmelCase_ : Dict = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
lowerCAmelCase_ : Dict = [0 for i in range(0 , 4 * size )]
lowerCAmelCase_ : Optional[int] = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowercase_ ( self , __lowercase ) -> int:
return idx * 2
def lowercase_ ( self , __lowercase ) -> int:
return idx * 2 + 1
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> None:
if left_element == right_element:
lowerCAmelCase_ : Tuple = a[left_element - 1]
else:
lowerCAmelCase_ : int = (left_element + right_element) // 2
self.build(self.left(__lowercase ) , __lowercase , __lowercase , __lowercase )
self.build(self.right(__lowercase ) , mid + 1 , __lowercase , __lowercase )
lowerCAmelCase_ : Any = max(
self.segment_tree[self.left(__lowercase )] , self.segment_tree[self.right(__lowercase )] )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> bool:
if self.flag[idx] is True:
lowerCAmelCase_ : Union[str, Any] = self.lazy[idx]
lowerCAmelCase_ : Union[str, Any] = False
if left_element != right_element:
lowerCAmelCase_ : Union[str, Any] = self.lazy[idx]
lowerCAmelCase_ : Any = self.lazy[idx]
lowerCAmelCase_ : List[str] = True
lowerCAmelCase_ : Optional[Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
lowerCAmelCase_ : Dict = val
if left_element != right_element:
lowerCAmelCase_ : Union[str, Any] = val
lowerCAmelCase_ : List[Any] = val
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : List[str] = True
return True
lowerCAmelCase_ : Optional[Any] = (left_element + right_element) // 2
self.update(self.left(__lowercase ) , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
self.update(self.right(__lowercase ) , mid + 1 , __lowercase , __lowercase , __lowercase , __lowercase )
lowerCAmelCase_ : int = max(
self.segment_tree[self.left(__lowercase )] , self.segment_tree[self.right(__lowercase )] )
return True
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> int | float:
if self.flag[idx] is True:
lowerCAmelCase_ : Union[str, Any] = self.lazy[idx]
lowerCAmelCase_ : Optional[Any] = False
if left_element != right_element:
lowerCAmelCase_ : List[Any] = self.lazy[idx]
lowerCAmelCase_ : Dict = self.lazy[idx]
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : Optional[int] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
lowerCAmelCase_ : List[Any] = (left_element + right_element) // 2
lowerCAmelCase_ : Tuple = self.query(self.left(__lowercase ) , __lowercase , __lowercase , __lowercase , __lowercase )
lowerCAmelCase_ : List[Any] = self.query(self.right(__lowercase ) , mid + 1 , __lowercase , __lowercase , __lowercase )
return max(__lowercase , __lowercase )
def __str__( self ) -> str:
return str([self.query(1 , 1 , self.size , __lowercase , __lowercase ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_UpperCAmelCase : str =[1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
_UpperCAmelCase : List[str] =15
_UpperCAmelCase : Any =SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt) | 262 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 24 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_UpperCAmelCase : Optional[int] ="""src/transformers"""
_UpperCAmelCase : str ="""docs/source/en"""
_UpperCAmelCase : Optional[int] ="""."""
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]:
with open(lowerCAmelCase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase_ : int = f.readlines()
# Find the start prompt.
lowerCAmelCase_ : List[Any] = 0
while not lines[start_index].startswith(lowerCAmelCase_ ):
start_index += 1
start_index += 1
lowerCAmelCase_ : List[str] = start_index
while not lines[end_index].startswith(lowerCAmelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_UpperCAmelCase : Optional[Any] ="""Model|Encoder|Decoder|ForConditionalGeneration"""
# Regexes that match TF/Flax/PT model names.
_UpperCAmelCase : Optional[int] =re.compile(R"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
_UpperCAmelCase : Dict =re.compile(R"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_UpperCAmelCase : Optional[Any] =re.compile(R"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# This is to make sure the transformers module imported is the one in the repo.
_UpperCAmelCase : Optional[int] =direct_transformers_import(TRANSFORMERS_PATH)
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[int]:
lowerCAmelCase_ : str = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , lowerCAmelCase_ )
return [m.group(0 ) for m in matches]
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
lowerCAmelCase_ : Tuple = 2 if text == '''✅''' or text == '''❌''' else len(lowerCAmelCase_ )
lowerCAmelCase_ : int = (width - text_length) // 2
lowerCAmelCase_ : Union[str, Any] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowerCAmelCase ( )-> str:
lowerCAmelCase_ : Any = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowerCAmelCase_ : Dict = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowerCAmelCase_ : List[Any] = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowerCAmelCase_ : Tuple = collections.defaultdict(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = collections.defaultdict(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[Any] = collections.defaultdict(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = collections.defaultdict(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = collections.defaultdict(lowerCAmelCase_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowerCAmelCase_ ):
lowerCAmelCase_ : Optional[int] = None
if attr_name.endswith('''Tokenizer''' ):
lowerCAmelCase_ : Union[str, Any] = slow_tokenizers
lowerCAmelCase_ : List[str] = attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
lowerCAmelCase_ : int = fast_tokenizers
lowerCAmelCase_ : Union[str, Any] = attr_name[:-13]
elif _re_tf_models.match(lowerCAmelCase_ ) is not None:
lowerCAmelCase_ : Tuple = tf_models
lowerCAmelCase_ : str = _re_tf_models.match(lowerCAmelCase_ ).groups()[0]
elif _re_flax_models.match(lowerCAmelCase_ ) is not None:
lowerCAmelCase_ : Tuple = flax_models
lowerCAmelCase_ : Union[str, Any] = _re_flax_models.match(lowerCAmelCase_ ).groups()[0]
elif _re_pt_models.match(lowerCAmelCase_ ) is not None:
lowerCAmelCase_ : Any = pt_models
lowerCAmelCase_ : List[Any] = _re_pt_models.match(lowerCAmelCase_ ).groups()[0]
if lookup_dict is not None:
while len(lowerCAmelCase_ ) > 0:
if attr_name in model_name_to_prefix.values():
lowerCAmelCase_ : Union[str, Any] = True
break
# Try again after removing the last word in the name
lowerCAmelCase_ : Any = ''''''.join(camel_case_split(lowerCAmelCase_ )[:-1] )
# Let's build that table!
lowerCAmelCase_ : int = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
lowerCAmelCase_ : Tuple = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowerCAmelCase_ : Union[str, Any] = [len(lowerCAmelCase_ ) + 2 for c in columns]
lowerCAmelCase_ : Optional[Any] = max([len(lowerCAmelCase_ ) for name in model_names] ) + 2
# Build the table per se
lowerCAmelCase_ : Dict = '''|''' + '''|'''.join([_center_text(lowerCAmelCase_ , lowerCAmelCase_ ) for c, w in zip(lowerCAmelCase_ , lowerCAmelCase_ )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
lowerCAmelCase_ : List[str] = {True: '''✅''', False: '''❌'''}
for name in model_names:
lowerCAmelCase_ : List[Any] = model_name_to_prefix[name]
lowerCAmelCase_ : Union[str, Any] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowerCAmelCase_ , lowerCAmelCase_ ) for l, w in zip(lowerCAmelCase_ , lowerCAmelCase_ )] ) + "|\n"
return table
def lowerCAmelCase ( lowerCAmelCase_=False )-> Tuple:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = _find_text_in_file(
filename=os.path.join(lowerCAmelCase_ , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , )
lowerCAmelCase_ : Tuple = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowerCAmelCase_ , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] =argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_UpperCAmelCase : Tuple =parser.parse_args()
check_model_table(args.fix_and_overwrite) | 262 | 0 |
'''simple docstring'''
SCREAMING_SNAKE_CASE_: dict[tuple[int, int, int], int] ={}
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : int , snake_case_ : Any ) -> int:
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
UpperCAmelCase_ = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
UpperCAmelCase_ = _calculate(days - 1 , lowerCAmelCase_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
UpperCAmelCase_ = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
UpperCAmelCase_ = _calculate(days - 1 , lowerCAmelCase_ , 0 )
UpperCAmelCase_ = state_late + state_absent + state_ontime
UpperCAmelCase_ = prizestrings
return prizestrings
def lowerCAmelCase_ ( snake_case_ : List[Any] = 30 ) -> int:
'''simple docstring'''
return _calculate(lowerCAmelCase_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 1 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCAmelCase ( )-> int:
lowerCAmelCase_ : int = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
lowerCAmelCase_ : Dict = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(lowerCAmelCase_ )
DownloadCommand.register_subcommand(lowerCAmelCase_ )
EnvironmentCommand.register_subcommand(lowerCAmelCase_ )
RunCommand.register_subcommand(lowerCAmelCase_ )
ServeCommand.register_subcommand(lowerCAmelCase_ )
UserCommands.register_subcommand(lowerCAmelCase_ )
AddNewModelCommand.register_subcommand(lowerCAmelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCAmelCase_ )
LfsCommands.register_subcommand(lowerCAmelCase_ )
PTtoTFCommand.register_subcommand(lowerCAmelCase_ )
# Let's go
lowerCAmelCase_ : Union[str, Any] = parser.parse_args()
if not hasattr(lowerCAmelCase_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
lowerCAmelCase_ : List[Any] = args.func(lowerCAmelCase_ )
service.run()
if __name__ == "__main__":
main() | 262 | 0 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class a ( UpperCAmelCase__ ):
snake_case_ = DistilBertTokenizer
snake_case_ = DistilBertTokenizerFast
snake_case_ = True
@slow
def A_ ( self : Optional[int] ):
snake_case_ = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
snake_case_ = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowercase )
snake_case_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowercase )
snake_case_ = tokenizer.build_inputs_with_special_tokens(__lowercase )
snake_case_ = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 56 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_UpperCAmelCase : Tuple =None
_UpperCAmelCase : int =logging.get_logger(__name__)
_UpperCAmelCase : Dict ={"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Any ={
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : int ={
"""facebook/nllb-large-en-ro""": 1024,
"""facebook/nllb-200-distilled-600M""": 1024,
}
# fmt: off
_UpperCAmelCase : Any =["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE__ : int = NllbTokenizer
SCREAMING_SNAKE_CASE__ : List[int] = []
SCREAMING_SNAKE_CASE__ : List[int] = []
def __init__( self , __lowercase=None , __lowercase=None , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=False , **__lowercase , ) -> int:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : int = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
lowerCAmelCase_ : List[Any] = legacy_behaviour
super().__init__(
vocab_file=__lowercase , tokenizer_file=__lowercase , bos_token=__lowercase , eos_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , src_lang=__lowercase , tgt_lang=__lowercase , additional_special_tokens=__lowercase , legacy_behaviour=__lowercase , **__lowercase , )
lowerCAmelCase_ : Any = vocab_file
lowerCAmelCase_ : List[Any] = False if not self.vocab_file else True
lowerCAmelCase_ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
lowerCAmelCase_ : Optional[Any] = {
lang_code: self.convert_tokens_to_ids(__lowercase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCAmelCase_ : Any = src_lang if src_lang is not None else '''eng_Latn'''
lowerCAmelCase_ : str = self.convert_tokens_to_ids(self._src_lang )
lowerCAmelCase_ : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowercase_ ( self ) -> str:
return self._src_lang
@src_lang.setter
def lowercase_ ( self , __lowercase ) -> None:
lowerCAmelCase_ : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , **__lowercase ) -> str:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowerCAmelCase_ : List[str] = src_lang
lowerCAmelCase_ : int = self(__lowercase , add_special_tokens=__lowercase , return_tensors=__lowercase , **__lowercase )
lowerCAmelCase_ : Dict = self.convert_tokens_to_ids(__lowercase )
lowerCAmelCase_ : List[Any] = tgt_lang_id
return inputs
def lowercase_ ( self , __lowercase , __lowercase = "eng_Latn" , __lowercase = None , __lowercase = "fra_Latn" , **__lowercase , ) -> BatchEncoding:
lowerCAmelCase_ : List[str] = src_lang
lowerCAmelCase_ : List[str] = tgt_lang
return super().prepare_seqaseq_batch(__lowercase , __lowercase , **__lowercase )
def lowercase_ ( self ) -> List[Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def lowercase_ ( self ) -> str:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase_ ( self , __lowercase ) -> None:
lowerCAmelCase_ : List[str] = self.convert_tokens_to_ids(__lowercase )
if self.legacy_behaviour:
lowerCAmelCase_ : Any = []
lowerCAmelCase_ : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase_ : Optional[int] = [self.cur_lang_code]
lowerCAmelCase_ : List[Any] = [self.eos_token_id]
lowerCAmelCase_ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase_ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase_ : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowercase_ ( self , __lowercase ) -> None:
lowerCAmelCase_ : Dict = self.convert_tokens_to_ids(__lowercase )
if self.legacy_behaviour:
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : Any = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase_ : Any = [self.cur_lang_code]
lowerCAmelCase_ : Any = [self.eos_token_id]
lowerCAmelCase_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase_ : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase_ : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
lowerCAmelCase_ : Any = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,) | 262 | 0 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def a__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = BeautifulSoup(requests.get(lowerCAmelCase_ , params=lowerCAmelCase_ ).content , "html.parser" )
lowerCAmelCase : List[Any] = soup.find("div" , attrs={"class": "gs_ri"} )
lowerCAmelCase : Union[str, Any] = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
lowerCAmelCase__ = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 30,
"""pages""": """3979-3990""",
"""year""": 2_018,
"""hl""": """en""",
}
print(get_citation('''https://scholar.google.com/scholar_lookup''', params=params))
| 108 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
_UpperCAmelCase : Optional[Any] =NewType("""DataClass""", Any)
_UpperCAmelCase : Dict =NewType("""DataClassType""", Any)
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def lowerCAmelCase ( lowerCAmelCase_ )-> Callable[[str], Any]:
lowerCAmelCase_ : str = {str(lowerCAmelCase_ ): choice for choice in choices}
return lambda lowerCAmelCase_ : str_to_choice.get(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( *,
lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = dataclasses.MISSING , lowerCAmelCase_ = dataclasses.MISSING , lowerCAmelCase_ = None , **lowerCAmelCase_ , )-> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowerCAmelCase_ : Dict = {}
if aliases is not None:
lowerCAmelCase_ : str = aliases
if help is not None:
lowerCAmelCase_ : Tuple = help
return dataclasses.field(metadata=lowerCAmelCase_ , default=lowerCAmelCase_ , default_factory=lowerCAmelCase_ , **lowerCAmelCase_ )
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Iterable[DataClassType]
def __init__( self , __lowercase , **__lowercase ) -> List[str]:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
lowerCAmelCase_ : Optional[int] = ArgumentDefaultsHelpFormatter
super().__init__(**__lowercase )
if dataclasses.is_dataclass(__lowercase ):
lowerCAmelCase_ : Union[str, Any] = [dataclass_types]
lowerCAmelCase_ : List[Any] = list(__lowercase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__lowercase )
@staticmethod
def lowercase_ ( __lowercase , __lowercase ) -> Union[str, Any]:
lowerCAmelCase_ : Optional[Any] = f"""--{field.name}"""
lowerCAmelCase_ : Tuple = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __lowercase ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
lowerCAmelCase_ : List[str] = kwargs.pop('''aliases''' , [] )
if isinstance(__lowercase , __lowercase ):
lowerCAmelCase_ : Optional[Any] = [aliases]
lowerCAmelCase_ : Any = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(__lowercase , '''UnionType''' ) and isinstance(__lowercase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__lowercase ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
f""" Problem encountered in field '{field.name}'.""" )
if type(__lowercase ) not in field.type.__args__:
# filter `str` in Union
lowerCAmelCase_ : List[Any] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowerCAmelCase_ : Dict = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowerCAmelCase_ : str = (
field.type.__args__[0] if isinstance(__lowercase , field.type.__args__[1] ) else field.type.__args__[1]
)
lowerCAmelCase_ : List[Any] = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowerCAmelCase_ : Dict = {}
if origin_type is Literal or (isinstance(field.type , __lowercase ) and issubclass(field.type , __lowercase )):
if origin_type is Literal:
lowerCAmelCase_ : Optional[Any] = field.type.__args__
else:
lowerCAmelCase_ : int = [x.value for x in field.type]
lowerCAmelCase_ : str = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
lowerCAmelCase_ : str = field.default
else:
lowerCAmelCase_ : Tuple = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowerCAmelCase_ : Tuple = copy(__lowercase )
# Hack because type=bool in argparse does not behave as we want.
lowerCAmelCase_ : Dict = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowerCAmelCase_ : Union[str, Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowerCAmelCase_ : List[str] = default
# This tells argparse we accept 0 or 1 value after --field_name
lowerCAmelCase_ : int = '''?'''
# This is the value that will get picked if we do --field_name (without value)
lowerCAmelCase_ : List[Any] = True
elif isclass(__lowercase ) and issubclass(__lowercase , __lowercase ):
lowerCAmelCase_ : Union[str, Any] = field.type.__args__[0]
lowerCAmelCase_ : Dict = '''+'''
if field.default_factory is not dataclasses.MISSING:
lowerCAmelCase_ : Any = field.default_factory()
elif field.default is dataclasses.MISSING:
lowerCAmelCase_ : Optional[int] = True
else:
lowerCAmelCase_ : List[Any] = field.type
if field.default is not dataclasses.MISSING:
lowerCAmelCase_ : Dict = field.default
elif field.default_factory is not dataclasses.MISSING:
lowerCAmelCase_ : List[Any] = field.default_factory()
else:
lowerCAmelCase_ : int = True
parser.add_argument(__lowercase , *__lowercase , **__lowercase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowerCAmelCase_ : Any = False
parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **__lowercase )
def lowercase_ ( self , __lowercase ) -> List[Any]:
if hasattr(__lowercase , '''_argument_group_name''' ):
lowerCAmelCase_ : str = self.add_argument_group(dtype._argument_group_name )
else:
lowerCAmelCase_ : Dict = self
try:
lowerCAmelCase_ : Dict[str, type] = get_type_hints(__lowercase )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(__lowercase ):
lowerCAmelCase_ : Any = '''.'''.join(map(__lowercase , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(__lowercase ):
if not field.init:
continue
lowerCAmelCase_ : Optional[int] = type_hints[field.name]
self._parse_dataclass_field(__lowercase , __lowercase )
def lowercase_ ( self , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=None , __lowercase=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowerCAmelCase_ : str = []
if args_filename:
args_files.append(Path(__lowercase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowerCAmelCase_ : str = ArgumentParser()
args_file_parser.add_argument(__lowercase , type=__lowercase , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = args_file_parser.parse_known_args(args=__lowercase )
lowerCAmelCase_ : int = vars(__lowercase ).get(args_file_flag.lstrip('''-''' ) , __lowercase )
if cmd_args_file_paths:
args_files.extend([Path(__lowercase ) for p in cmd_args_file_paths] )
lowerCAmelCase_ : Dict = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowerCAmelCase_ : Any = file_args + args if args is not None else file_args + sys.argv[1:]
lowerCAmelCase_ , lowerCAmelCase_ : Dict = self.parse_known_args(args=__lowercase )
lowerCAmelCase_ : Any = []
for dtype in self.dataclass_types:
lowerCAmelCase_ : str = {f.name for f in dataclasses.fields(__lowercase ) if f.init}
lowerCAmelCase_ : str = {k: v for k, v in vars(__lowercase ).items() if k in keys}
for k in keys:
delattr(__lowercase , __lowercase )
lowerCAmelCase_ : Optional[int] = dtype(**__lowercase )
outputs.append(__lowercase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__lowercase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def lowercase_ ( self , __lowercase , __lowercase = False ) -> Tuple[DataClass, ...]:
lowerCAmelCase_ : int = set(args.keys() )
lowerCAmelCase_ : str = []
for dtype in self.dataclass_types:
lowerCAmelCase_ : int = {f.name for f in dataclasses.fields(__lowercase ) if f.init}
lowerCAmelCase_ : List[str] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowerCAmelCase_ : List[str] = dtype(**__lowercase )
outputs.append(__lowercase )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__lowercase )}""" )
return tuple(__lowercase )
def lowercase_ ( self , __lowercase , __lowercase = False ) -> Tuple[DataClass, ...]:
with open(Path(__lowercase ) , encoding='''utf-8''' ) as open_json_file:
lowerCAmelCase_ : Dict = json.loads(open_json_file.read() )
lowerCAmelCase_ : str = self.parse_dict(__lowercase , allow_extra_keys=__lowercase )
return tuple(__lowercase )
def lowercase_ ( self , __lowercase , __lowercase = False ) -> Tuple[DataClass, ...]:
lowerCAmelCase_ : Optional[Any] = self.parse_dict(yaml.safe_load(Path(__lowercase ).read_text() ) , allow_extra_keys=__lowercase )
return tuple(__lowercase ) | 262 | 0 |
import math
def a__ ( _UpperCamelCase : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(lowerCAmelCase_ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a__ ( _UpperCamelCase : str = 0.1 ):
__lowerCamelCase = 3
__lowerCamelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 ,(j + 2) * (j + 2) ,j + 1 ):
primes += is_prime(lowerCAmelCase_ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 330 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def lowerCAmelCase ( lowerCAmelCase_ )-> Union[str, Any]:
return EnvironmentCommand()
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
@staticmethod
def lowercase_ ( __lowercase ) -> List[Any]:
lowerCAmelCase_ : List[str] = parser.add_parser('''env''' )
download_parser.set_defaults(func=__lowercase )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Optional[Any] = huggingface_hub.__version__
lowerCAmelCase_ : str = '''not installed'''
lowerCAmelCase_ : str = '''NA'''
if is_torch_available():
import torch
lowerCAmelCase_ : Any = torch.__version__
lowerCAmelCase_ : str = torch.cuda.is_available()
lowerCAmelCase_ : List[str] = '''not installed'''
if is_transformers_available():
import transformers
lowerCAmelCase_ : Any = transformers.__version__
lowerCAmelCase_ : Optional[Any] = '''not installed'''
if is_accelerate_available():
import accelerate
lowerCAmelCase_ : List[Any] = accelerate.__version__
lowerCAmelCase_ : List[str] = '''not installed'''
if is_xformers_available():
import xformers
lowerCAmelCase_ : Optional[Any] = xformers.__version__
lowerCAmelCase_ : int = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(__lowercase ) )
return info
@staticmethod
def lowercase_ ( __lowercase ) -> str:
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n" | 262 | 0 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"""vocab_file""": """spiece.model"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
lowerCAmelCase_ = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
lowerCAmelCase_ = 2
lowerCAmelCase_ = 3
lowerCAmelCase_ = 4
class __A ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : Dict = """left"""
def __init__( self : Any ,_snake_case : Optional[Any] ,_snake_case : Any=False ,_snake_case : Optional[int]=True ,_snake_case : int=False ,_snake_case : str="<s>" ,_snake_case : int="</s>" ,_snake_case : List[str]="<unk>" ,_snake_case : Dict="<sep>" ,_snake_case : int="<pad>" ,_snake_case : Optional[int]="<cls>" ,_snake_case : Optional[int]="<mask>" ,_snake_case : List[Any]=["<eop>", "<eod>"] ,_snake_case : str = None ,**_snake_case : int ,) -> None:
"""simple docstring"""
lowercase__ : str = AddedToken(__lowercase ,lstrip=__lowercase ,rstrip=__lowercase ) if isinstance(__lowercase ,__lowercase ) else mask_token
lowercase__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowercase ,remove_space=__lowercase ,keep_accents=__lowercase ,bos_token=__lowercase ,eos_token=__lowercase ,unk_token=__lowercase ,sep_token=__lowercase ,pad_token=__lowercase ,cls_token=__lowercase ,mask_token=__lowercase ,additional_special_tokens=__lowercase ,sp_model_kwargs=self.sp_model_kwargs ,**__lowercase ,)
lowercase__ : Optional[Any] = 3
lowercase__ : List[str] = do_lower_case
lowercase__ : Optional[int] = remove_space
lowercase__ : Any = keep_accents
lowercase__ : Optional[Any] = vocab_file
lowercase__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowercase )
@property
def UpperCAmelCase ( self : str ) -> Any:
"""simple docstring"""
return len(self.sp_model )
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Dict = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ : Dict = self.__dict__.copy()
lowercase__ : Union[str, Any] = None
return state
def __setstate__( self : Any ,_snake_case : Any ) -> Tuple:
"""simple docstring"""
lowercase__ : Any = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
lowercase__ : int = {}
lowercase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self : Optional[int] ,_snake_case : int ) -> Optional[Any]:
"""simple docstring"""
if self.remove_space:
lowercase__ : Tuple = ''' '''.join(inputs.strip().split() )
else:
lowercase__ : Tuple = inputs
lowercase__ : str = outputs.replace('''``''' ,'''"''' ).replace('''\'\'''' ,'''"''' )
if not self.keep_accents:
lowercase__ : List[Any] = unicodedata.normalize('''NFKD''' ,__lowercase )
lowercase__ : Any = ''''''.join([c for c in outputs if not unicodedata.combining(__lowercase )] )
if self.do_lower_case:
lowercase__ : Dict = outputs.lower()
return outputs
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : Dict = self.preprocess_text(__lowercase )
lowercase__ : Optional[Any] = self.sp_model.encode(__lowercase ,out_type=__lowercase )
lowercase__ : int = []
for piece in pieces:
if len(__lowercase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowercase__ : List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowercase ,'''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowercase__ : Optional[Any] = cur_pieces[1:]
else:
lowercase__ : str = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowercase )
else:
new_pieces.append(__lowercase )
return new_pieces
def UpperCAmelCase ( self : str ,_snake_case : Any ) -> Optional[Any]:
"""simple docstring"""
return self.sp_model.PieceToId(__lowercase )
def UpperCAmelCase ( self : int ,_snake_case : Tuple ) -> Optional[int]:
"""simple docstring"""
return self.sp_model.IdToPiece(__lowercase )
def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Dict = ''''''.join(__lowercase ).replace(__lowercase ,''' ''' ).strip()
return out_string
def UpperCAmelCase ( self : int ,_snake_case : Tuple ,_snake_case : int = False ,_snake_case : Dict = None ,_snake_case : Optional[Any] = True ,**_snake_case : Union[str, Any] ,) -> str:
"""simple docstring"""
lowercase__ : List[Any] = kwargs.pop('''use_source_tokenizer''' ,__lowercase )
lowercase__ : Any = self.convert_ids_to_tokens(__lowercase ,skip_special_tokens=__lowercase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowercase__ : List[str] = []
lowercase__ : Tuple = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__lowercase ) )
lowercase__ : int = []
sub_texts.append(__lowercase )
else:
current_sub_text.append(__lowercase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__lowercase ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowercase__ : Any = ''''''.join(__lowercase )
lowercase__ : Tuple = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowercase__ : Dict = self.clean_up_tokenization(__lowercase )
return clean_text
else:
return text
def UpperCAmelCase ( self : Tuple ,_snake_case : int ,_snake_case : List[Any] = None ) -> List[int]:
"""simple docstring"""
lowercase__ : Any = [self.sep_token_id]
lowercase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase ( self : Optional[int] ,_snake_case : List[Any] ,_snake_case : int = None ,_snake_case : List[Any] = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase ,token_ids_a=__lowercase ,already_has_special_tokens=__lowercase )
if token_ids_a is not None:
return ([0] * len(__lowercase )) + [1] + ([0] * len(__lowercase )) + [1, 1]
return ([0] * len(__lowercase )) + [1, 1]
def UpperCAmelCase ( self : Any ,_snake_case : Tuple ,_snake_case : str = None ) -> List[int]:
"""simple docstring"""
lowercase__ : Union[str, Any] = [self.sep_token_id]
lowercase__ : Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCAmelCase ( self : Any ,_snake_case : Optional[int] ,_snake_case : str = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : List[Any] = os.path.join(
__lowercase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowercase ,'''wb''' ) as fi:
lowercase__ : Dict = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (out_vocab_file,)
| 16 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class snake_case__( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = JukeboxTokenizer
SCREAMING_SNAKE_CASE__ : int = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def lowercase_ ( self ) -> Union[str, Any]:
import torch
lowerCAmelCase_ : Union[str, Any] = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
lowerCAmelCase_ : Any = tokenizer(**self.metas )['''input_ids''']
# fmt: off
lowerCAmelCase_ : List[str] = [
torch.tensor([[
0, 0, 0, 7_1_6_9, 5_0_7, 9, 7_6, 3_9, 3_1, 4_6, 7_6, 2_7,
7_6, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8, 3_1, 4_4, 7_6, 3_2,
4_4, 4_1, 3_9, 7_6, 2_7, 4_0, 7_6, 2_7, 4_0, 4_6, 3_5, 4_3,
4_7, 3_1, 7_6, 3_8, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 4_1, 7_6, 4_5, 2_7, 3_5,
3_0, 7_6, 7_1, 2_0, 4_9, 4_1, 7_6, 4_8, 2_7, 4_5, 4_6, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 4_4, 4_7, 4_0, 3_7, 3_8, 3_1, 4_5,
4_5, 7_6, 3_8, 3_1, 3_3, 4_5, 7_6, 4_1, 3_2, 7_6, 4_5, 4_6,
4_1, 4_0, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
1_9, 4_6, 2_7, 4_0, 3_0, 7_6, 3_5, 4_0, 7_6, 4_6, 3_4, 3_1,
7_6, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3, 7_6, 6_3, 7_6, 6_3,
7_6, 6_3, 7_6, 1_4, 3_1, 2_7, 4_4, 7_6, 4_6, 3_4, 3_1, 3_9,
6_4, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_5, 2_7, 4_0,
3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 8,
2_7, 3_8, 3_2, 7_6, 4_5, 4_7, 4_0, 3_7, 7_6, 2_7, 7_6, 4_5,
3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0, 7_6, 4_8, 3_5, 4_5,
2_7, 3_3, 3_1, 7_6, 3_8, 3_5, 3_1, 4_5, 6_4, 7_6, 4_9, 3_4,
4_1, 4_5, 3_1, 7_6, 3_2, 4_4, 4_1, 4_9, 4_0, 6_4, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6, 4_9,
4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_6, 3_8, 3_5, 4_2, 6_4,
7_6, 2_7, 4_0, 3_0, 7_6, 4_5, 4_0, 3_1, 3_1, 4_4, 7_6, 4_1,
3_2, 7_6, 2_9, 4_1, 3_8, 3_0, 7_6, 2_9, 4_1, 3_9, 3_9, 2_7,
4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_5, 4_6,
4_5, 7_6, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6, 4_1, 4_4, 7_6, 4_9,
3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 4_1, 4_5, 3_1, 7_6, 4_2, 2_7,
4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_6, 4_4, 3_1, 2_7, 3_0, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 3_5, 2_9,
3_4, 7_6, 5_1, 3_1, 4_6, 7_6, 4_5, 4_7, 4_4, 4_8, 3_5, 4_8,
3_1, 6_4, 7_6, 4_5, 4_6, 2_7, 3_9, 4_2, 3_1, 3_0, 7_6, 4_1,
4_0, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 3_8, 3_5, 3_2, 3_1,
3_8, 3_1, 4_5, 4_5, 7_6, 4_6, 3_4, 3_5, 4_0, 3_3, 4_5, 6_4,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_4, 3_1,
7_6, 3_4, 2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_9,
4_1, 2_9, 3_7, 3_1, 3_0, 7_6, 4_6, 3_4, 3_1, 3_9, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_4, 3_1, 2_7, 4_4,
4_6, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_2, 3_1, 3_0, 6_6, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6,
4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_2, 3_1, 3_0, 3_1, 4_5,
4_6, 2_7, 3_8, 6_4, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 4_9,
4_1, 4_4, 3_0, 4_5, 7_6, 2_7, 4_2, 4_2, 3_1, 2_7, 4_4, 6_5,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_3, 5_1, 7_6,
4_0, 2_7, 3_9, 3_1, 7_6, 3_5, 4_5, 7_6, 1_5, 5_2, 5_1, 3_9,
2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_6, 1_1, 3_5, 4_0, 3_3,
7_6, 4_1, 3_2, 7_6, 1_1, 3_5, 4_0, 3_3, 4_5, 6_6, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_2, 4_1, 4_1, 3_7, 7_6,
4_1, 4_0, 7_6, 3_9, 5_1, 7_6, 2_3, 4_1, 4_4, 3_7, 4_5, 6_4,
7_6, 5_1, 3_1, 7_6, 1_3, 3_5, 3_3, 3_4, 4_6, 5_1, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 3_0, 3_1, 4_5, 4_2, 2_7, 3_5, 4_4, 6_7,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_4, 4_1, 4_6,
3_4, 3_5, 4_0, 3_3, 7_6, 2_8, 3_1, 4_5, 3_5, 3_0, 3_1, 7_6,
4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3, 7_6, 1_8, 4_1, 4_7,
4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_0, 3_1, 2_9, 2_7, 5_1,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_5, 3_2, 7_6,
4_6, 3_4, 2_7, 4_6, 7_6, 2_9, 4_1, 3_8, 4_1, 4_5, 4_5, 2_7,
3_8, 7_6, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4, 7_6, 2_8, 4_1, 4_7,
4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_6, 2_7, 4_0, 3_0, 7_6, 2_8,
2_7, 4_4, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_4, 3_1, 7_6, 3_8, 4_1, 4_0, 3_1, 7_6, 2_7, 4_0, 3_0,
7_6, 3_8, 3_1, 4_8, 3_1, 3_8, 7_6, 4_5, 2_7, 4_0, 3_0, 4_5,
7_6, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4, 7_6, 3_2, 2_7, 4_4,
7_6, 2_7, 4_9, 2_7, 5_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
7_6, 7_6]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def lowercase_ ( self ) -> List[Any]:
import torch
lowerCAmelCase_ : Any = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
lowerCAmelCase_ : str = tokenizer(**self.metas )['''input_ids''']
# fmt: off
lowerCAmelCase_ : Tuple = [
torch.tensor([[
0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1, 9, 7_7, 3_9,
3_1, 4_6, 7_7, 2_7, 7_7, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8,
3_1, 4_4, 7_7, 3_2, 4_4, 4_1, 3_9, 7_7, 2_7, 4_0, 7_7, 2_7,
4_0, 4_6, 3_5, 4_3, 4_7, 3_1, 7_7, 3_8, 2_7, 4_0, 3_0, 6_4,
7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_3, 3_4, 4_1,
7_7, 4_5, 2_7, 3_5, 3_0, 7_7, 7_2, 2_0, 4_9, 4_1, 7_7, 4_8,
2_7, 4_5, 4_6, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 4_4, 4_7, 4_0,
3_7, 3_8, 3_1, 4_5, 4_5, 7_7, 3_8, 3_1, 3_3, 4_5, 7_7, 4_1,
3_2, 7_7, 4_5, 4_6, 4_1, 4_0, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 1_9, 4_6, 2_7, 4_0, 3_0, 7_7, 3_5, 4_0,
7_7, 4_6, 3_4, 3_1, 7_7, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3,
7_7, 6_3, 7_7, 6_3, 7_7, 6_3, 7_7, 1_4, 3_1, 2_7, 4_4, 7_7,
4_6, 3_4, 3_1, 3_9, 6_4, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1,
7_7, 4_5, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 8, 2_7, 3_8, 3_2, 7_7, 4_5, 4_7, 4_0, 3_7,
7_7, 2_7, 7_7, 4_5, 3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0,
7_7, 4_8, 3_5, 4_5, 2_7, 3_3, 3_1, 7_7, 3_8, 3_5, 3_1, 4_5,
6_4, 7_7, 4_9, 3_4, 4_1, 4_5, 3_1, 7_7, 3_2, 4_4, 4_1, 4_9,
4_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1,
4_0, 3_0, 7_7, 4_9, 4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_7,
3_8, 3_5, 4_2, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_5, 4_0, 3_1,
3_1, 4_4, 7_7, 4_1, 3_2, 7_7, 2_9, 4_1, 3_8, 3_0, 7_7, 2_9,
4_1, 3_9, 3_9, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 2_7,
4_6, 7_7, 3_5, 4_6, 4_5, 7_7, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6,
4_1, 4_4, 7_7, 4_9, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 4_1, 4_5,
3_1, 7_7, 4_2, 2_7, 4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_7, 4_4,
3_1, 2_7, 3_0, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
2_3, 3_4, 3_5, 2_9, 3_4, 7_7, 5_1, 3_1, 4_6, 7_7, 4_5, 4_7,
4_4, 4_8, 3_5, 4_8, 3_1, 6_4, 7_7, 4_5, 4_6, 2_7, 3_9, 4_2,
3_1, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 4_5, 3_1, 7_7,
3_8, 3_5, 3_2, 3_1, 3_8, 3_1, 4_5, 4_5, 7_7, 4_6, 3_4, 3_5,
4_0, 3_3, 4_5, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 2_0, 3_4, 3_1, 7_7, 3_4, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4,
2_7, 4_6, 7_7, 3_9, 4_1, 2_9, 3_7, 3_1, 3_0, 7_7, 4_6, 3_4,
3_1, 3_9, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7,
3_4, 3_1, 2_7, 4_4, 4_6, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_2,
3_1, 3_0, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
1, 4_0, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 4_2,
3_1, 3_0, 3_1, 4_5, 4_6, 2_7, 3_8, 6_4, 7_7, 4_6, 3_4, 3_1,
4_5, 3_1, 7_7, 4_9, 4_1, 4_4, 3_0, 4_5, 7_7, 2_7, 4_2, 4_2,
3_1, 2_7, 4_4, 6_5, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_3, 5_1, 7_7, 4_0, 2_7, 3_9, 3_1, 7_7, 3_5, 4_5, 7_7,
1_5, 5_2, 5_1, 3_9, 2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_7,
1_1, 3_5, 4_0, 3_3, 7_7, 4_1, 3_2, 7_7, 1_1, 3_5, 4_0, 3_3,
4_5, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_2,
4_1, 4_1, 3_7, 7_7, 4_1, 4_0, 7_7, 3_9, 5_1, 7_7, 2_3, 4_1,
4_4, 3_7, 4_5, 6_4, 7_7, 5_1, 3_1, 7_7, 1_3, 3_5, 3_3, 3_4,
4_6, 5_1, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 3_0, 3_1, 4_5, 4_2,
2_7, 3_5, 4_4, 6_7, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_4, 4_1, 4_6, 3_4, 3_5, 4_0, 3_3, 7_7, 2_8, 3_1, 4_5,
3_5, 3_0, 3_1, 7_7, 4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3,
7_7, 1_8, 4_1, 4_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_0,
3_1, 2_9, 2_7, 5_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_5, 3_2, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 2_9, 4_1, 3_8,
4_1, 4_5, 4_5, 2_7, 3_8, 7_7, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4,
7_7, 2_8, 4_1, 4_7, 4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_7, 2_7,
4_0, 3_0, 7_7, 2_8, 2_7, 4_4, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_4, 3_1, 7_7, 3_8, 4_1, 4_0, 3_1,
7_7, 2_7, 4_0, 3_0, 7_7, 3_8, 3_1, 4_8, 3_1, 3_8, 7_7, 4_5,
2_7, 4_0, 3_0, 4_5, 7_7, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4,
7_7, 3_2, 2_7, 4_4, 7_7, 2_7, 4_9, 2_7, 5_1, 7_9, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 7_7, 7_7]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) | 262 | 0 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=16 , lowerCAmelCase__=[32, 64, 128] , lowerCAmelCase__=[1, 2, 1] , lowerCAmelCase__=[2, 2, 4] , lowerCAmelCase__=2 , lowerCAmelCase__=2.0 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=10 , lowerCAmelCase__=8 , lowerCAmelCase__=["stage1", "stage2"] , lowerCAmelCase__=[1, 2] , ) -> Union[str, Any]:
a : List[Any] = parent
a : Tuple = batch_size
a : List[Any] = image_size
a : Dict = patch_size
a : List[str] = num_channels
a : List[str] = embed_dim
a : Optional[int] = hidden_sizes
a : int = depths
a : str = num_heads
a : str = window_size
a : List[Any] = mlp_ratio
a : int = qkv_bias
a : Dict = hidden_dropout_prob
a : Dict = attention_probs_dropout_prob
a : List[str] = drop_path_rate
a : Tuple = hidden_act
a : Optional[Any] = use_absolute_embeddings
a : List[str] = patch_norm
a : str = layer_norm_eps
a : List[Any] = initializer_range
a : Tuple = is_training
a : List[Any] = scope
a : List[str] = use_labels
a : int = type_sequence_label_size
a : Union[str, Any] = encoder_stride
a : int = out_features
a : Tuple = out_indices
def __a ( self ) -> Optional[Any]:
a : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a : Tuple = None
if self.use_labels:
a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : int = self.get_config()
return config, pixel_values, labels
def __a ( self ) -> Tuple:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
a : int = FocalNetModel(config=__lowercase )
model.to(__lowercase )
model.eval()
a : Union[str, Any] = model(__lowercase )
a : Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
a : str = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
a : Tuple = FocalNetBackbone(config=__lowercase )
model.to(__lowercase )
model.eval()
a : Optional[int] = model(__lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
a : int = None
a : Dict = FocalNetBackbone(config=__lowercase )
model.to(__lowercase )
model.eval()
a : List[Any] = model(__lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
a : int = FocalNetForMaskedImageModeling(config=__lowercase )
model.to(__lowercase )
model.eval()
a : List[str] = model(__lowercase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
a : Union[str, Any] = 1
a : str = FocalNetForMaskedImageModeling(__lowercase )
model.to(__lowercase )
model.eval()
a : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a : Dict = model(__lowercase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
a : Union[str, Any] = self.type_sequence_label_size
a : Optional[Any] = FocalNetForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
a : Any = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a : int = 1
a : List[str] = FocalNetForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
a : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a : Optional[int] = model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self ) -> Dict:
a : List[str] = self.prepare_config_and_inputs()
a : Tuple = config_and_inputs
a : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : List[str] =(
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase : Any =(
{"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : Union[str, Any] =False
lowerCamelCase : Tuple =False
lowerCamelCase : Optional[Any] =False
lowerCamelCase : Tuple =False
lowerCamelCase : str =False
def __a ( self ) -> Tuple:
a : Tuple = FocalNetModelTester(self )
a : int = ConfigTester(self , config_class=__lowercase , embed_dim=37 , has_text_modality=__lowercase )
def __a ( self ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __a ( self ) -> Union[str, Any]:
return
def __a ( self ) -> str:
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def __a ( self ) -> Dict:
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowercase )
def __a ( self ) -> Union[str, Any]:
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowercase )
def __a ( self ) -> Tuple:
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def __a ( self ) -> List[Any]:
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def __a ( self ) -> Dict:
pass
def __a ( self ) -> Union[str, Any]:
a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
a : Optional[Any] = model_class(__lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase , nn.Linear ) )
def __a ( self ) -> int:
a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
a : Optional[Any] = model_class(__lowercase )
a : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : List[Any] = [*signature.parameters.keys()]
a : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
a : Tuple = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
a : str = model(**self._prepare_for_class(__lowercase , __lowercase ) )
a : Union[str, Any] = outputs.hidden_states
a : List[str] = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__lowercase ) , __lowercase )
# FocalNet has a different seq_length
a : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
a : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
a : Any = outputs.reshaped_hidden_states
self.assertEqual(len(__lowercase ) , __lowercase )
a : List[str] = reshaped_hidden_states[0].shape
a : Tuple = (
reshaped_hidden_states[0].view(__lowercase , __lowercase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __a ( self ) -> Union[str, Any]:
a : Any = self.model_tester.prepare_config_and_inputs_for_common()
a : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
a : Optional[int] = True
self.check_hidden_states_output(__lowercase , __lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : Dict = True
self.check_hidden_states_output(__lowercase , __lowercase , __lowercase , __lowercase )
def __a ( self ) -> Dict:
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
a : Optional[int] = 3
a : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
a : Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
a : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
a : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
a : Union[str, Any] = True
self.check_hidden_states_output(__lowercase , __lowercase , __lowercase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : Union[str, Any] = True
self.check_hidden_states_output(__lowercase , __lowercase , __lowercase , (padded_height, padded_width) )
@slow
def __a ( self ) -> List[str]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : List[Any] = FocalNetModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def __a ( self ) -> Dict:
a : str = self.model_tester.prepare_config_and_inputs_for_common()
a : int = _config_zero_init(__lowercase )
for model_class in self.all_model_classes:
a : Tuple = model_class(config=__lowercase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __a ( self ) -> List[Any]:
# TODO update organization
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def __a ( self ) -> Any:
a : List[Any] = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(__lowercase )
a : List[Any] = self.default_image_processor
a : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
a : Dict = image_processor(images=__lowercase , return_tensors="pt" ).to(__lowercase )
# forward pass
with torch.no_grad():
a : List[str] = model(**__lowercase )
# verify the logits
a : List[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
a : Union[str, Any] = torch.tensor([0.2_166, -0.4_368, 0.2_191] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class __UpperCamelCase ( UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : Tuple =(FocalNetBackbone,) if is_torch_available() else ()
lowerCamelCase : Union[str, Any] =FocalNetConfig
lowerCamelCase : List[Any] =False
def __a ( self ) -> Optional[Any]:
a : List[Any] = FocalNetModelTester(self )
| 105 |
from __future__ import annotations
import requests
def lowerCAmelCase ( lowerCAmelCase_ )-> dict:
lowerCAmelCase_ : List[Any] = f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(lowerCAmelCase_ ).json()
def lowerCAmelCase ( lowerCAmelCase_ = 10 )-> list[dict]:
lowerCAmelCase_ : List[Any] = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
lowerCAmelCase_ : Tuple = requests.get(lowerCAmelCase_ ).json()[:max_stories]
return [get_hackernews_story(lowerCAmelCase_ ) for story_id in story_ids]
def lowerCAmelCase ( lowerCAmelCase_ = 10 )-> str:
lowerCAmelCase_ : Optional[Any] = hackernews_top_stories(lowerCAmelCase_ )
return "\n".join('''* [{title}]({url})'''.format(**lowerCAmelCase_ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown()) | 262 | 0 |
"""simple docstring"""
def lowercase__ ( _UpperCAmelCase = 60_08_51_47_51_43 ) -> int:
'''simple docstring'''
try:
lowercase : Optional[int] = int(lowerCAmelCase_ )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
lowercase : Optional[int] = 2
lowercase : Optional[int] = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
lowercase : Any = i
while n % i == 0:
lowercase : Tuple = n // i
i += 1
return int(lowerCAmelCase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 255 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : List[str] =get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_UpperCAmelCase : Optional[int] =25_0004
_UpperCAmelCase : Tuple =25_0020
@require_sentencepiece
@require_tokenizers
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = MBartTokenizer
SCREAMING_SNAKE_CASE__ : Dict = MBartTokenizerFast
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : List[str] = True
def lowercase_ ( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ : str = MBartTokenizer(__lowercase , keep_accents=__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[int] = MBartTokenizer(__lowercase , keep_accents=__lowercase )
lowerCAmelCase_ : Dict = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowerCAmelCase_ : Optional[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowerCAmelCase_ : Dict = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(
__lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def lowercase_ ( self ) -> Dict:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase_ : int = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase )
lowerCAmelCase_ : int = self.tokenizer_class.from_pretrained(__lowercase , **__lowercase )
lowerCAmelCase_ : Tuple = tempfile.mkdtemp()
lowerCAmelCase_ : Union[str, Any] = tokenizer_r.save_pretrained(__lowercase )
lowerCAmelCase_ : Dict = tokenizer_p.save_pretrained(__lowercase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCAmelCase_ : str = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(__lowercase , __lowercase )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : Tuple = tokenizer_r.from_pretrained(__lowercase )
lowerCAmelCase_ : Dict = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowercase )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase_ : Optional[Any] = tempfile.mkdtemp()
lowerCAmelCase_ : int = tokenizer_r.save_pretrained(__lowercase , legacy_format=__lowercase )
lowerCAmelCase_ : Tuple = tokenizer_p.save_pretrained(__lowercase )
# Checks it save with the same files
self.assertSequenceEqual(__lowercase , __lowercase )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : Optional[int] = tokenizer_r.from_pretrained(__lowercase )
lowerCAmelCase_ : List[Any] = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
shutil.rmtree(__lowercase )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase_ : Optional[Any] = tempfile.mkdtemp()
lowerCAmelCase_ : List[str] = tokenizer_r.save_pretrained(__lowercase , legacy_format=__lowercase )
lowerCAmelCase_ : Optional[int] = tokenizer_p.save_pretrained(__lowercase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : Dict = tokenizer_r.from_pretrained(__lowercase )
lowerCAmelCase_ : List[Any] = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
shutil.rmtree(__lowercase )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = """facebook/mbart-large-en-ro"""
SCREAMING_SNAKE_CASE__ : int = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
SCREAMING_SNAKE_CASE__ : str = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def lowercase_ ( cls ) -> Optional[int]:
lowerCAmelCase_ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
lowerCAmelCase_ : Optional[Any] = 1
return cls
def lowercase_ ( self ) -> Optional[Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 2_5_0_0_2_0 )
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowercase )
def lowercase_ ( self ) -> Any:
self.assertIn(__lowercase , self.tokenizer.all_special_ids )
lowerCAmelCase_ : Union[str, Any] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
lowerCAmelCase_ : Tuple = self.tokenizer.decode(__lowercase , skip_special_tokens=__lowercase )
lowerCAmelCase_ : List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertNotIn(self.tokenizer.eos_token , __lowercase )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Union[str, Any] = ['''this is gunna be a long sentence ''' * 2_0]
assert isinstance(src_text[0] , __lowercase )
lowerCAmelCase_ : str = 1_0
lowerCAmelCase_ : Tuple = self.tokenizer(__lowercase , max_length=__lowercase , truncation=__lowercase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __lowercase )
self.assertEqual(len(__lowercase ) , __lowercase )
def lowercase_ ( self ) -> int:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Any = tempfile.mkdtemp()
lowerCAmelCase_ : int = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : Optional[Any] = MBartTokenizer.from_pretrained(__lowercase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowercase )
@require_torch
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowercase , return_tensors='''pt''' )
lowerCAmelCase_ : Tuple = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowercase , truncation=__lowercase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
lowerCAmelCase_ : int = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
lowerCAmelCase_ : str = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowercase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Optional[Any] = self.tokenizer(self.src_text , padding=__lowercase , truncation=__lowercase , max_length=3 , return_tensors='''pt''' )
lowerCAmelCase_ : Any = self.tokenizer(
text_target=self.tgt_text , padding=__lowercase , truncation=__lowercase , max_length=1_0 , return_tensors='''pt''' )
lowerCAmelCase_ : int = targets['''input_ids''']
lowerCAmelCase_ : Optional[Any] = shift_tokens_right(__lowercase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Any = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(__lowercase ) , {
# A, test, EOS, en_XX
'''input_ids''': [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 2_5_0_0_0_1,
} , ) | 262 | 0 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCamelCase__ ( _a):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
def lowerCamelCase__ ( _a):
# word like '180' or '身高' or '神'
for char in word:
SCREAMING_SNAKE_CASE : str = ord(lowerCAmelCase_)
if not _is_chinese_char(lowerCAmelCase_):
return 0
return 1
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : List[Any] = set()
for token in tokens:
SCREAMING_SNAKE_CASE : Any = len(lowerCAmelCase_) > 1 and is_chinese(lowerCAmelCase_)
if chinese_word:
word_set.add(lowerCAmelCase_)
SCREAMING_SNAKE_CASE : List[Any] = list(lowerCAmelCase_)
return word_list
def lowerCamelCase__ ( _a , _a):
if not chinese_word_set:
return bert_tokens
SCREAMING_SNAKE_CASE : List[str] = max([len(lowerCAmelCase_) for w in chinese_word_set])
SCREAMING_SNAKE_CASE : Union[str, Any] = bert_tokens
SCREAMING_SNAKE_CASE : Dict = 0, len(lowerCAmelCase_)
while start < end:
SCREAMING_SNAKE_CASE : Dict = True
if is_chinese(bert_word[start]):
SCREAMING_SNAKE_CASE : Optional[Any] = min(end - start , lowerCAmelCase_)
for i in range(lowerCAmelCase_ , 1 , -1):
SCREAMING_SNAKE_CASE : str = ''''''.join(bert_word[start : start + i])
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i):
SCREAMING_SNAKE_CASE : str = '''##''' + bert_word[j]
SCREAMING_SNAKE_CASE : Tuple = start + i
SCREAMING_SNAKE_CASE : List[str] = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i in range(0 , len(lowerCAmelCase_) , 100):
SCREAMING_SNAKE_CASE : str = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["cws"]).cws
SCREAMING_SNAKE_CASE : Any = [get_chinese_word(lowerCAmelCase_) for r in res]
ltp_res.extend(lowerCAmelCase_)
assert len(lowerCAmelCase_) == len(lowerCAmelCase_)
SCREAMING_SNAKE_CASE : str = []
for i in range(0 , len(lowerCAmelCase_) , 100):
SCREAMING_SNAKE_CASE : Union[str, Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=512)
bert_res.extend(res["input_ids"])
assert len(lowerCAmelCase_) == len(lowerCAmelCase_)
SCREAMING_SNAKE_CASE : List[Any] = []
for input_ids, chinese_word in zip(lowerCAmelCase_ , lowerCAmelCase_):
SCREAMING_SNAKE_CASE : List[str] = []
for id in input_ids:
SCREAMING_SNAKE_CASE : int = bert_tokenizer._convert_id_to_token(lowerCAmelCase_)
input_tokens.append(lowerCAmelCase_)
SCREAMING_SNAKE_CASE : Optional[Any] = add_sub_symbol(lowerCAmelCase_ , lowerCAmelCase_)
SCREAMING_SNAKE_CASE : Tuple = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCAmelCase_):
if token[:2] == "##":
SCREAMING_SNAKE_CASE : str = token[2:]
# save chinese tokens' pos
if len(lowerCAmelCase_) == 1 and _is_chinese_char(ord(lowerCAmelCase_)):
ref_id.append(lowerCAmelCase_)
ref_ids.append(lowerCAmelCase_)
assert len(lowerCAmelCase_) == len(lowerCAmelCase_)
return ref_ids
def lowerCamelCase__ ( _a):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , "r" , encoding="utf-8") as f:
SCREAMING_SNAKE_CASE : Tuple = f.readlines()
SCREAMING_SNAKE_CASE : Optional[int] = [line.strip() for line in data if len(lowerCAmelCase_) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
SCREAMING_SNAKE_CASE : List[Any] = LTP(args.ltp) # faster in GPU device
SCREAMING_SNAKE_CASE : int = BertTokenizer.from_pretrained(args.bert)
SCREAMING_SNAKE_CASE : Tuple = prepare_ref(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
with open(args.save_path , "w" , encoding="utf-8") as f:
SCREAMING_SNAKE_CASE : List[str] = [json.dumps(lowerCAmelCase_) + '''\n''' for ref in ref_ids]
f.writelines(lowerCAmelCase_)
if __name__ == "__main__":
a_ = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
a_ = parser.parse_args()
main(args) | 76 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = "cpu" , lowerCAmelCase_ = None )-> None:
lowerCAmelCase_ : str = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(lowerCAmelCase_ , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
lowerCAmelCase_ : int = v.half()
if save_path is None: # overwrite src_path
lowerCAmelCase_ : Tuple = src_path
torch.save(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
fire.Fire(convert) | 262 | 0 |
"""simple docstring"""
from math import ceil
def lowercase_ ( _snake_case = 1_001 ):
SCREAMING_SNAKE_CASE__ : Optional[int] = 1
for i in range(1 ,int(ceil(n / 2.0 ) ) ):
SCREAMING_SNAKE_CASE__ : Tuple = 2 * i + 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 * i
SCREAMING_SNAKE_CASE__ : str = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
UpperCAmelCase__ : Dict = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 25 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 262 | 0 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCamelCase (UpperCAmelCase__ ):
'''simple docstring'''
_snake_case : int = ["""pixel_values"""]
def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = 1 / 2_5_5 , _UpperCamelCase = True , _UpperCamelCase = IMAGENET_DEFAULT_MEAN , _UpperCamelCase = IMAGENET_DEFAULT_STD , **_UpperCamelCase , ) -> None:
super().__init__(**__lowercase )
UpperCAmelCase_ : List[str] = size if size is not None else {'''shortest_edge''': 2_2_4}
UpperCAmelCase_ : Union[str, Any] = get_size_dict(__lowercase , default_to_square=__lowercase )
UpperCAmelCase_ : List[str] = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
UpperCAmelCase_ : List[Any] = get_size_dict(__lowercase , param_name='crop_size' )
UpperCAmelCase_ : List[str] = do_resize
UpperCAmelCase_ : List[Any] = size
UpperCAmelCase_ : Tuple = resample
UpperCAmelCase_ : Any = do_center_crop
UpperCAmelCase_ : List[Any] = crop_size
UpperCAmelCase_ : int = do_rescale
UpperCAmelCase_ : Optional[int] = rescale_factor
UpperCAmelCase_ : str = do_normalize
UpperCAmelCase_ : Dict = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase_ : int = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray:
UpperCAmelCase_ : Optional[int] = get_size_dict(__lowercase , default_to_square=__lowercase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
UpperCAmelCase_ : Any = int((2_5_6 / 2_2_4) * size['shortest_edge'] )
UpperCAmelCase_ : str = get_resize_output_image_size(__lowercase , size=__lowercase , default_to_square=__lowercase )
UpperCAmelCase_ : int = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" )
return resize(
__lowercase , size=(size_dict['height'], size_dict['width']) , resample=__lowercase , data_format=__lowercase , **__lowercase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray:
UpperCAmelCase_ : Tuple = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"Size dict must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(__lowercase , size=(size['height'], size['width']) , data_format=__lowercase , **__lowercase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray:
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray:
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ) -> BatchFeature:
UpperCAmelCase_ : Any = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : Union[str, Any] = resample if resample is not None else self.resample
UpperCAmelCase_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : int = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : List[Any] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : Union[str, Any] = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Optional[Any] = size if size is not None else self.size
UpperCAmelCase_ : Any = get_size_dict(__lowercase , default_to_square=__lowercase )
UpperCAmelCase_ : List[str] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : Dict = get_size_dict(__lowercase , param_name='crop_size' )
UpperCAmelCase_ : Tuple = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Tuple = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
UpperCAmelCase_ : Dict = [self.resize(__lowercase , __lowercase , __lowercase ) for image in images]
if do_center_crop:
UpperCAmelCase_ : int = [self.center_crop(__lowercase , __lowercase ) for image in images]
if do_rescale:
UpperCAmelCase_ : Dict = [self.rescale(__lowercase , __lowercase ) for image in images]
if do_normalize:
UpperCAmelCase_ : List[str] = [self.normalize(__lowercase , __lowercase , __lowercase ) for image in images]
UpperCAmelCase_ : int = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
UpperCAmelCase_ : Union[str, Any] = {'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 29 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Optional[Any] =logging.get_logger(__name__)
_UpperCAmelCase : str ={
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = """vit_mae"""
def __init__( self , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase=3_0_7_2 , __lowercase="gelu" , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=1e-12 , __lowercase=2_2_4 , __lowercase=1_6 , __lowercase=3 , __lowercase=True , __lowercase=1_6 , __lowercase=5_1_2 , __lowercase=8 , __lowercase=2_0_4_8 , __lowercase=0.75 , __lowercase=False , **__lowercase , ) -> str:
super().__init__(**__lowercase )
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : Any = num_hidden_layers
lowerCAmelCase_ : Any = num_attention_heads
lowerCAmelCase_ : int = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : int = hidden_dropout_prob
lowerCAmelCase_ : str = attention_probs_dropout_prob
lowerCAmelCase_ : List[str] = initializer_range
lowerCAmelCase_ : Dict = layer_norm_eps
lowerCAmelCase_ : Union[str, Any] = image_size
lowerCAmelCase_ : Optional[int] = patch_size
lowerCAmelCase_ : Tuple = num_channels
lowerCAmelCase_ : List[str] = qkv_bias
lowerCAmelCase_ : List[Any] = decoder_num_attention_heads
lowerCAmelCase_ : int = decoder_hidden_size
lowerCAmelCase_ : Optional[int] = decoder_num_hidden_layers
lowerCAmelCase_ : Tuple = decoder_intermediate_size
lowerCAmelCase_ : Tuple = mask_ratio
lowerCAmelCase_ : Any = norm_pix_loss | 262 | 0 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
snake_case_ = logging.getLogger(__name__)
snake_case_ = 50 # max width of layer names
snake_case_ = 70 # max width of quantizer names
def lowerCamelCase__ ( snake_case_ : List[str] ) -> Union[str, Any]:
__snake_case = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' , type=lowerCAmelCase_ , default=8 , help='''weight precision''' )
group.add_argument('''--aprec''' , type=lowerCAmelCase_ , default=8 , help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' , type=lowerCAmelCase_ , nargs='''+''' , help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' , type=lowerCAmelCase_ , help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' , type=lowerCAmelCase_ , help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' , metavar='''N''' , type=lowerCAmelCase_ , help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' , action='''store_true''' , help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) , )
def lowerCamelCase__ ( snake_case_ : Union[str, Any] ) -> int:
if args.calibrator == "max":
__snake_case = '''max'''
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
__snake_case = '''histogram'''
elif args.calibrator == "mse":
__snake_case = '''histogram'''
else:
raise ValueError(f"""Invalid calibrator {args.calibrator}""" )
__snake_case = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCAmelCase_ )
__snake_case = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCAmelCase_ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCAmelCase_ )
def lowerCamelCase__ ( snake_case_ : Any , snake_case_ : List[str] , snake_case_ : Union[str, Any]=False , snake_case_ : Union[str, Any]=False ) -> Any:
logger.info('''Configuring Model for Quantization''' )
logger.info(f"""using quantization package {pytorch_quantization.__file__}""" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCAmelCase_ , ['''embeddings'''] , which='''weight''' , _disabled=lowerCAmelCase_ )
if args.quant_disable:
set_quantizer_by_name(lowerCAmelCase_ , [''''''] , _disabled=lowerCAmelCase_ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCAmelCase_ , args.quant_disable_keyword , _disabled=lowerCAmelCase_ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=lowerCAmelCase_ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=lowerCAmelCase_ )
if args.recalibrate_weights:
recalibrate_weights(lowerCAmelCase_ )
if args.fuse_qkv:
fuse_qkv(lowerCAmelCase_ , lowerCAmelCase_ )
if args.clip_gelu:
clip_gelu(lowerCAmelCase_ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCAmelCase_ )
def lowerCamelCase__ ( snake_case_ : Union[str, Any] ) -> List[str]:
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f"""{name:80}: {module}""" )
def lowerCamelCase__ ( snake_case_ : Optional[Any] , snake_case_ : Any ) -> Any:
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCAmelCase_ )
def lowerCamelCase__ ( snake_case_ : Optional[int] , snake_case_ : Dict ) -> Any:
def fusea(snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : str ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCAmelCase_ , '''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
__snake_case = qq._amax.detach().item()
__snake_case = qk._amax.detach().item()
__snake_case = qv._amax.detach().item()
__snake_case = max(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
qq._amax.fill_(lowerCAmelCase_ )
qk._amax.fill_(lowerCAmelCase_ )
qv._amax.fill_(lowerCAmelCase_ )
logger.info(f""" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}""" )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(f"""FUSE_QKV: {name:{name_width}}""" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : List[str] ) -> str:
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
__snake_case = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCAmelCase_ )
__snake_case = mod._input_quantizer._amax.data.detach().item()
logger.info(f"""CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}""" )
def lowerCamelCase__ ( snake_case_ : Union[str, Any] ) -> Optional[Any]:
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
__snake_case = mod.weight.shape[0]
__snake_case = mod._weight_quantizer._amax.detach()
__snake_case = torch.ones(lowerCAmelCase_ , dtype=amax.dtype , device=amax.device ) * amax
print(f"""expanding {name} {amax} -> {mod._weight_quantizer._amax}""" )
def lowerCamelCase__ ( snake_case_ : Union[str, Any] ) -> str:
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer , '''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
__snake_case = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
__snake_case = set(range(len(mod.weight.size() ) ) ) - axis_set
__snake_case = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCAmelCase_ , keepdims=lowerCAmelCase_ ).detach()
logger.info(f"""RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}""" )
__snake_case = amax
def lowerCamelCase__ ( snake_case_ : Union[str, Any] , snake_case_ : str=25 , snake_case_ : Tuple=180 , snake_case_ : str=None ) -> Dict:
if ignore is None:
__snake_case = []
elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__snake_case = [ignore]
__snake_case = 0
for name, mod in model.named_modules():
if not hasattr(lowerCAmelCase_ , '''weight''' ):
continue
__snake_case = max(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
for name, mod in model.named_modules():
__snake_case = getattr(lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ )
__snake_case = getattr(lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ )
if not hasattr(lowerCAmelCase_ , '''weight''' ):
continue
if type(lowerCAmelCase_ ) in ignore:
continue
if [True for s in ignore if type(lowerCAmelCase_ ) is str and s in name]:
continue
__snake_case = f"""Act:{input_q.extra_repr()}"""
__snake_case = f"""Wgt:{weight_q.extra_repr()}"""
__snake_case = f"""{name:{name_width}} {act_str} {wgt_str}"""
if len(lowerCAmelCase_ ) <= line_width:
logger.info(lowerCAmelCase_ )
else:
logger.info(f"""{name:{name_width}} {act_str}""" )
logger.info(f"""{' ':{name_width}} {wgt_str}""" )
def lowerCamelCase__ ( snake_case_ : Any ) -> int:
__snake_case = 0
for name, mod in model.named_modules():
if isinstance(lowerCAmelCase_ , pytorch_quantization.nn.TensorQuantizer ):
print(f"""{name:80} {mod}""" )
count += 1
print(f"""{count} TensorQuantizers found in model""" )
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Dict ) -> Any:
__snake_case = getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if quantizer_mod is not None:
assert hasattr(lowerCAmelCase_ , lowerCAmelCase_ )
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
logger.warning(f"""{name} has no {quantizer}""" )
def lowerCamelCase__ ( snake_case_ : Dict , snake_case_ : Dict , snake_case_ : Optional[int]="both" , **snake_case_ : List[Any] ) -> List[Any]:
__snake_case = f"""Warning: changing {which} quantizers of {name:{qname_width}}"""
for k, v in kwargs.items():
s += f""" {k}={v}"""
if which in ["input", "both"]:
set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ )
if which in ["weight", "both"]:
set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info(lowerCAmelCase_ )
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : Optional[Any] , **snake_case_ : Dict ) -> Any:
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase_ , '''_input_quantizer''' ) or hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ):
for n in names:
if re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
set_quantizers(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
__snake_case = f"""Warning: changing {name:{name_width}}"""
for k, v in kwargs.items():
s += f""" {k}={v}"""
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info(lowerCAmelCase_ )
| 24 |
def lowerCAmelCase ( lowerCAmelCase_ = 10**9 )-> int:
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Optional[int] = 2
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : str = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
lowerCAmelCase_ : Any = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""") | 262 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ) -> dict[str, float]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(lowerCAmelCase_ , 2 ) - pow(lowerCAmelCase_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(lowerCAmelCase_ , 2 ) - pow(lowerCAmelCase_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(lowerCAmelCase_ , 2 ) + pow(lowerCAmelCase_ , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 |
import inspect
import unittest
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> int:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def lowercase_ ( self ) -> List[str]:
import diffusers
from diffusers.dependency_versions_table import deps
lowerCAmelCase_ : Any = inspect.getmembers(__lowercase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
lowerCAmelCase_ : Optional[int] = '''k-diffusion'''
elif backend == "invisible_watermark":
lowerCAmelCase_ : Dict = '''invisible-watermark'''
assert backend in deps, f"""{backend} is not in the deps table!""" | 262 | 0 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
snake_case_ = [[0 for _ in range(lowerCAmelCase_ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
snake_case_ = 1
for n in range(m + 1 ):
for k in range(1, lowerCAmelCase_ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
a : Any = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
a : Optional[Any] = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 56 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_UpperCAmelCase : Any =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__lowercase , **__lowercase ) -> None:
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , __lowercase , )
super().__init__(*__lowercase , **__lowercase ) | 262 | 0 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
if num < 0:
return False
lowerCAmelCase : int = num
lowerCAmelCase : int = 0
while num > 0:
lowerCAmelCase : Any = rev_num * 1_0 + (num % 1_0)
num //= 1_0
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108 |
def lowerCAmelCase ( lowerCAmelCase_ )-> set:
lowerCAmelCase_ : Optional[int] = set()
# edges = list of graph's edges
lowerCAmelCase_ : List[str] = get_edges(lowerCAmelCase_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = edges.pop()
chosen_vertices.add(lowerCAmelCase_ )
chosen_vertices.add(lowerCAmelCase_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowerCAmelCase_ )
return chosen_vertices
def lowerCAmelCase ( lowerCAmelCase_ )-> set:
lowerCAmelCase_ : List[Any] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}") | 262 | 0 |
import math
class __lowerCAmelCase :
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = 0.0
__lowerCamelCase = 0.0
for i in range(len(__lowercase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
for i in range(len(__lowercase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def a__ ( ):
# Training Examples ( m, n )
__lowerCamelCase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
__lowerCamelCase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
__lowerCamelCase = SelfOrganizingMap()
__lowerCamelCase = 3
__lowerCamelCase = 0.5
for _ in range(lowerCAmelCase_ ):
for j in range(len(lowerCAmelCase_ ) ):
# training sample
__lowerCamelCase = training_samples[j]
# Compute the winning vector
__lowerCamelCase = self_organizing_map.get_winner(lowerCAmelCase_ ,lowerCAmelCase_ )
# Update the winning vector
__lowerCamelCase = self_organizing_map.update(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
# classify test sample
__lowerCamelCase = [0, 0, 0, 1]
__lowerCamelCase = self_organizing_map.get_winner(lowerCAmelCase_ ,lowerCAmelCase_ )
# results
print(F"""Clusters that the test sample belongs to : {winner}""" )
print(F"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 330 |
from math import sqrt
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase_ : List[Any] = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase_ : Optional[int] = False
for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase_ : Tuple = False
break
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool"
return status
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase_ : Tuple = list(range(2 , n + 1 ) )
lowerCAmelCase_ : Optional[int] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase_ : str = 0
# filters actual prime numbers.
lowerCAmelCase_ : Optional[int] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase_ : List[Any] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCAmelCase_ ):
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase_ : int = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase_ : List[Any] = 2
lowerCAmelCase_ : Optional[int] = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase_ ):
while quotient != 1:
if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0):
ans.append(lowerCAmelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[int]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : Dict = 0
# prime factorization of 'number'
lowerCAmelCase_ : Any = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = max(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : List[Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ : Dict = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : int = min(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ )
), "'number' must been an int, even and > 2"
lowerCAmelCase_ : str = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase_ : int = get_prime_numbers(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = len(lowerCAmelCase_ )
# run variable for while-loops.
lowerCAmelCase_ : Union[str, Any] = 0
lowerCAmelCase_ : Tuple = None
# exit variable. for break up the loops
lowerCAmelCase_ : int = True
while i < len_pn and loop:
lowerCAmelCase_ : int = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase_ : Tuple = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (len(lowerCAmelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : int = 0
while numbera != 0:
lowerCAmelCase_ : str = numbera % numbera
lowerCAmelCase_ : List[Any] = numbera
lowerCAmelCase_ : Any = rest
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : List[Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ )
elif numbera == 1 or numbera == 1:
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : Union[str, Any] = max(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : Union[str, Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase_ : Optional[Any] = prime_fac_a.count(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ):
ans *= n
else:
lowerCAmelCase_ : List[str] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase_ : Optional[Any] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : Optional[int] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime(
lowerCAmelCase_ ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
assert (
is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase_ : Union[str, Any] = p_number_a + 1 # jump to the next number
lowerCAmelCase_ : Optional[int] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase_ : List[Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase_ : Union[str, Any] = get_divisors(lowerCAmelCase_ )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase_ : Optional[Any] = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase_ : Any = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Union[str, Any] = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase_ : Union[str, Any] = ans
ans += fiba
lowerCAmelCase_ : Optional[Any] = tmp
return ans | 262 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> float:
lowercase__ : Dict = u
for i in range(1 , lowerCAmelCase_ ):
lowercase__ : Union[str, Any] = temp * (u - i)
return temp
def __UpperCAmelCase ( ) -> None:
lowercase__ : Tuple = int(input('''enter the numbers of values: ''' ) )
lowercase__ : list[list[float]] = []
for _ in range(lowerCAmelCase_ ):
y.append([] )
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
y[i].append(lowerCAmelCase_ )
lowercase__ : Tuple = 0
print('''enter the values of parameters in a list: ''' )
lowercase__ : List[str] = list(map(lowerCAmelCase_ , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(lowerCAmelCase_ ):
lowercase__ : int = float(input() )
lowercase__ : Dict = int(input('''enter the value to interpolate: ''' ) )
lowercase__ : str = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , lowerCAmelCase_ ):
for j in range(n - i ):
lowercase__ : Tuple = y[j + 1][i - 1] - y[j][i - 1]
lowercase__ : Optional[Any] = y[0][0]
for i in range(1 , lowerCAmelCase_ ):
summ += (ucal(lowerCAmelCase_ , lowerCAmelCase_ ) * y[0][i]) / math.factorial(lowerCAmelCase_ )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 16 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_UpperCAmelCase : Union[str, Any] ="""pt"""
elif is_tf_available():
_UpperCAmelCase : List[Any] ="""tf"""
else:
_UpperCAmelCase : Optional[int] ="""jax"""
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PerceiverTokenizer
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def lowercase_ ( self ) -> Optional[int]:
super().setUp()
lowerCAmelCase_ : str = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self ) -> Any:
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def lowercase_ ( self , **__lowercase ) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase=False , __lowercase=2_0 , __lowercase=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowerCAmelCase_ : Optional[Any] = []
for i in range(len(__lowercase ) ):
try:
lowerCAmelCase_ : List[str] = tokenizer.decode([i] , clean_up_tokenization_spaces=__lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCAmelCase_ : List[str] = list(filter(lambda __lowercase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , __lowercase ) )
lowerCAmelCase_ : Optional[int] = list(filter(lambda __lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowercase ) , __lowercase ) )
if max_length is not None and len(__lowercase ) > max_length:
lowerCAmelCase_ : Union[str, Any] = toks[:max_length]
if min_length is not None and len(__lowercase ) < min_length and len(__lowercase ) > 0:
while len(__lowercase ) < min_length:
lowerCAmelCase_ : Union[str, Any] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase_ : List[str] = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase_ : int = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
if " " not in output_txt and len(__lowercase ) > 1:
lowerCAmelCase_ : Optional[int] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowercase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowercase )
)
if with_prefix_space:
lowerCAmelCase_ : Any = ''' ''' + output_txt
lowerCAmelCase_ : List[str] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
return output_txt, output_ids
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : List[str] = self.perceiver_tokenizer
lowerCAmelCase_ : Any = '''Unicode €.'''
lowerCAmelCase_ : Dict = tokenizer(__lowercase )
lowerCAmelCase_ : Any = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['''input_ids'''] , __lowercase )
# decoding
lowerCAmelCase_ : str = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , '''[CLS]Unicode €.[SEP]''' )
lowerCAmelCase_ : Optional[int] = tokenizer('''e è é ê ë''' )
lowerCAmelCase_ : str = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['''input_ids'''] , __lowercase )
# decoding
lowerCAmelCase_ : int = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Any = self.perceiver_tokenizer
lowerCAmelCase_ : Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
lowerCAmelCase_ : Optional[int] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
lowerCAmelCase_ : Optional[int] = tokenizer(__lowercase , padding=__lowercase , return_tensors=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
if FRAMEWORK != "jax":
lowerCAmelCase_ : str = list(batch.input_ids.numpy()[0] )
else:
lowerCAmelCase_ : Union[str, Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__lowercase , __lowercase )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : int = self.perceiver_tokenizer
lowerCAmelCase_ : Optional[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCAmelCase_ : List[Any] = tokenizer(__lowercase , padding=__lowercase , return_tensors=__lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , __lowercase )
self.assertIn('''attention_mask''' , __lowercase )
self.assertNotIn('''decoder_input_ids''' , __lowercase )
self.assertNotIn('''decoder_attention_mask''' , __lowercase )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = self.perceiver_tokenizer
lowerCAmelCase_ : int = [
'''Summary of the text.''',
'''Another summary.''',
]
lowerCAmelCase_ : List[str] = tokenizer(
text_target=__lowercase , max_length=3_2 , padding='''max_length''' , truncation=__lowercase , return_tensors=__lowercase )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
def lowercase_ ( self ) -> Optional[Any]:
# safety check on max_len default value so we are sure the test works
lowerCAmelCase_ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
lowerCAmelCase_ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase_ : Union[str, Any] = tempfile.mkdtemp()
lowerCAmelCase_ : str = ''' He is very happy, UNwant\u00E9d,running'''
lowerCAmelCase_ : Optional[int] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : Any = tokenizer.__class__.from_pretrained(__lowercase )
lowerCAmelCase_ : Tuple = after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
shutil.rmtree(__lowercase )
lowerCAmelCase_ : Optional[int] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase_ : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase_ : List[str] = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
lowerCAmelCase_ : Any = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowerCAmelCase_ : str = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : str = tokenizer.__class__.from_pretrained(__lowercase )
lowerCAmelCase_ : Optional[Any] = after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
lowerCAmelCase_ : str = tokenizer.__class__.from_pretrained(__lowercase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(__lowercase )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowercase )
with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase_ : Tuple = json.load(__lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase_ : Any = json.load(__lowercase )
lowerCAmelCase_ : Optional[int] = [f"""<extra_id_{i}>""" for i in range(1_2_5 )]
lowerCAmelCase_ : Optional[Any] = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
lowerCAmelCase_ : Any = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowercase , __lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowercase , __lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase_ : int = tokenizer_class.from_pretrained(
__lowercase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase_ : Tuple = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=__lowercase )]
lowerCAmelCase_ : Dict = tokenizer_class.from_pretrained(
__lowercase , additional_special_tokens=__lowercase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Any = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , '''�''' )
def lowercase_ ( self ) -> Tuple:
pass
def lowercase_ ( self ) -> Any:
pass
def lowercase_ ( self ) -> Tuple:
pass
def lowercase_ ( self ) -> List[str]:
pass
def lowercase_ ( self ) -> Dict:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
lowerCAmelCase_ : Tuple = self.get_tokenizers(fast=__lowercase , do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase_ : List[str] = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
lowerCAmelCase_ : Optional[int] = tokenizer.convert_tokens_to_string(__lowercase )
self.assertIsInstance(__lowercase , __lowercase ) | 262 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] ) ->list:
'''simple docstring'''
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
a : Tuple = gray_code_sequence_string(lowerCAmelCase_ )
#
# convert them to integers
for i in range(len(lowerCAmelCase_ ) ):
a : Optional[int] = int(sequence[i] , 2 )
return sequence
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] ) ->list:
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
a : Any = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
a : List[Any] = gray_code_sequence_string(bit_count - 1 )
a : str = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
a : List[str] = '''0''' + smaller_sequence[i]
sequence.append(lowerCAmelCase_ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
a : Optional[Any] = '''1''' + smaller_sequence[i]
sequence.append(lowerCAmelCase_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class snake_case__:
'''simple docstring'''
@staticmethod
def lowercase_ ( *__lowercase , **__lowercase ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
@require_torch
class snake_case__( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCAmelCase_ : str = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def lowercase_ ( self , __lowercase , __lowercase ) -> str:
lowerCAmelCase_ : Tuple = object_detector(examples[0] , threshold=0.0 )
lowerCAmelCase_ : Dict = len(__lowercase )
self.assertGreater(__lowercase , 0 )
self.assertEqual(
__lowercase , [
{
'''score''': ANY(__lowercase ),
'''label''': ANY(__lowercase ),
'''box''': {'''xmin''': ANY(__lowercase ), '''ymin''': ANY(__lowercase ), '''xmax''': ANY(__lowercase ), '''ymax''': ANY(__lowercase )},
}
for i in range(__lowercase )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def lowercase_ ( self ) -> List[str]:
pass
@require_torch
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Union[str, Any] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCAmelCase_ : Union[str, Any] = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
] , )
lowerCAmelCase_ : Union[str, Any] = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
]
] , )
@require_torch
@slow
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Any = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase_ : Dict = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
] , )
lowerCAmelCase_ : Tuple = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def lowercase_ ( self ) -> List[str]:
pass
@require_torch
@slow
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Any = 0.2
lowerCAmelCase_ : List[Any] = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase_ : Optional[Any] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=__lowercase , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
] , )
@require_torch
@slow
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Dict = 2
lowerCAmelCase_ : Union[str, Any] = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase_ : Optional[Any] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=__lowercase , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
] , ) | 262 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase: Optional[Any] = {
"""configuration_mobilebert""": [
"""MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileBertConfig""",
"""MobileBertOnnxConfig""",
],
"""tokenization_mobilebert""": ["""MobileBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: Optional[Any] = ["""MobileBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: Tuple = [
"""MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileBertForMaskedLM""",
"""MobileBertForMultipleChoice""",
"""MobileBertForNextSentencePrediction""",
"""MobileBertForPreTraining""",
"""MobileBertForQuestionAnswering""",
"""MobileBertForSequenceClassification""",
"""MobileBertForTokenClassification""",
"""MobileBertLayer""",
"""MobileBertModel""",
"""MobileBertPreTrainedModel""",
"""load_tf_weights_in_mobilebert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: Optional[Any] = [
"""TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileBertForMaskedLM""",
"""TFMobileBertForMultipleChoice""",
"""TFMobileBertForNextSentencePrediction""",
"""TFMobileBertForPreTraining""",
"""TFMobileBertForQuestionAnswering""",
"""TFMobileBertForSequenceClassification""",
"""TFMobileBertForTokenClassification""",
"""TFMobileBertMainLayer""",
"""TFMobileBertModel""",
"""TFMobileBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
_UpperCamelCase: Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 255 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels | 262 | 0 |
from datetime import datetime
import requests
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Optional[Any] = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
SCREAMING_SNAKE_CASE : Union[str, Any] = requests.get(base_url + url).json()[0]['''urls'''][0]['''src''']
return requests.get(lowerCAmelCase_).content
if __name__ == "__main__":
a_ = input('Enter Video/IGTV url: ').strip()
a_ = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(F'''Done. Video saved to disk as {file_name}.''') | 76 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
_UpperCAmelCase : Dict ={
"""susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""",
"""susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""",
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """ernie_m"""
SCREAMING_SNAKE_CASE__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __lowercase = 2_5_0_0_0_2 , __lowercase = 7_6_8 , __lowercase = 1_2 , __lowercase = 1_2 , __lowercase = 3_0_7_2 , __lowercase = "gelu" , __lowercase = 0.1 , __lowercase = 0.1 , __lowercase = 5_1_4 , __lowercase = 0.02 , __lowercase = 1 , __lowercase = 1e-05 , __lowercase=None , __lowercase=False , __lowercase=0.0 , **__lowercase , ) -> Tuple:
super().__init__(pad_token_id=__lowercase , **__lowercase )
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : Tuple = num_hidden_layers
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : Dict = intermediate_size
lowerCAmelCase_ : int = hidden_act
lowerCAmelCase_ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase_ : Any = attention_probs_dropout_prob
lowerCAmelCase_ : Union[str, Any] = max_position_embeddings
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : List[str] = layer_norm_eps
lowerCAmelCase_ : List[Any] = classifier_dropout
lowerCAmelCase_ : Any = is_decoder
lowerCAmelCase_ : List[Any] = act_dropout | 262 | 0 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : int = {
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""",
# See all BART models at https://huggingface.co/models?filter=bart
}
class lowerCAmelCase_ (UpperCAmelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = """bart"""
__UpperCamelCase : Union[str, Any] = ["""past_key_values"""]
__UpperCamelCase : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__(self , SCREAMING_SNAKE_CASE__=5_02_65 , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=40_96 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=40_96 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=2 , **SCREAMING_SNAKE_CASE__ , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = d_model
SCREAMING_SNAKE_CASE__ : Dict = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Optional[int] = encoder_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = encoder_attention_heads
SCREAMING_SNAKE_CASE__ : Any = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : str = decoder_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = dropout
SCREAMING_SNAKE_CASE__ : Any = attention_dropout
SCREAMING_SNAKE_CASE__ : int = activation_dropout
SCREAMING_SNAKE_CASE__ : str = activation_function
SCREAMING_SNAKE_CASE__ : Dict = init_std
SCREAMING_SNAKE_CASE__ : Dict = encoder_layerdrop
SCREAMING_SNAKE_CASE__ : Union[str, Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE__ : List[str] = classifier_dropout
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_cache
SCREAMING_SNAKE_CASE__ : Union[str, Any] = encoder_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , is_encoder_decoder=__lowercase , decoder_start_token_id=__lowercase , forced_eos_token_id=__lowercase , **__lowercase , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , __lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] = self.bos_token_id
warnings.warn(
F'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"""The config can simply be saved and uploaded again to be fixed.""" )
class lowerCAmelCase_ (UpperCAmelCase__ ):
"""simple docstring"""
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : List[str] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE__ : int = {0: '''batch'''}
SCREAMING_SNAKE_CASE__ : Optional[int] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE__ : Tuple = {0: '''batch''', 1: '''decoder_sequence'''}
SCREAMING_SNAKE_CASE__ : List[str] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__lowercase , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE__ : Tuple = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE__ : List[Any] = self.num_layers
for i in range(__lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[int] = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE__ : Optional[int] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : List[str] = super().outputs
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = super(__lowercase , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE__ : int = self.num_layers
for i in range(__lowercase ):
SCREAMING_SNAKE_CASE__ : str = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE__ : str = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , ) -> Mapping[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# Generate decoder inputs
SCREAMING_SNAKE_CASE__ : Any = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE__ : Dict = dict(**__lowercase , **__lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
SCREAMING_SNAKE_CASE__ : str = common_inputs['''input_ids'''].shape
SCREAMING_SNAKE_CASE__ : Union[str, Any] = common_inputs['''decoder_input_ids'''].shape[1]
SCREAMING_SNAKE_CASE__ : Tuple = self.num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ : str = decoder_seq_length + 3
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ : Dict = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(__lowercase , __lowercase )] , dim=1 )
SCREAMING_SNAKE_CASE__ : Dict = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE__ : List[str] = self.num_layers
SCREAMING_SNAKE_CASE__ : Tuple = min(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Any = max(__lowercase , __lowercase ) - min_num_layers
SCREAMING_SNAKE_CASE__ : int = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE__ : int = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__lowercase , __lowercase ):
common_inputs["past_key_values"].append((torch.zeros(__lowercase ), torch.zeros(__lowercase )) )
return common_inputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , ) -> Mapping[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
SCREAMING_SNAKE_CASE__ : Dict = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE__ : int = seqlen + 2
SCREAMING_SNAKE_CASE__ : Optional[int] = self.num_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.num_attention_heads
SCREAMING_SNAKE_CASE__ : Any = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ : str = common_inputs['''attention_mask'''].dtype
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(__lowercase , __lowercase , dtype=__lowercase )] , dim=1 )
SCREAMING_SNAKE_CASE__ : int = [
(torch.zeros(__lowercase ), torch.zeros(__lowercase )) for _ in range(__lowercase )
]
return common_inputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , ) -> Mapping[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = compute_effective_axis_dimension(
__lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.num_special_tokens_to_add(__lowercase )
SCREAMING_SNAKE_CASE__ : int = compute_effective_axis_dimension(
__lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowercase )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE__ : List[Any] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE__ : Any = dict(tokenizer(__lowercase , return_tensors=__lowercase ) )
return common_inputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : List[str] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase )
elif self.task == "causal-lm":
SCREAMING_SNAKE_CASE__ : Dict = self._generate_dummy_inputs_for_causal_lm(
__lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase )
else:
SCREAMING_SNAKE_CASE__ : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase )
return common_inputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = super()._flatten_past_key_values_(__lowercase , __lowercase , __lowercase , __lowercase )
else:
SCREAMING_SNAKE_CASE__ : Tuple = super(__lowercase , self )._flatten_past_key_values_(
__lowercase , __lowercase , __lowercase , __lowercase )
| 25 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=7 , __lowercase=3 , __lowercase=1_8 , __lowercase=3_0 , __lowercase=4_0_0 , __lowercase=True , __lowercase=None , __lowercase=True , __lowercase=None , ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = size if size is not None else {'''shortest_edge''': 2_0}
lowerCAmelCase_ : Any = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : Any = batch_size
lowerCAmelCase_ : Optional[int] = num_channels
lowerCAmelCase_ : Tuple = image_size
lowerCAmelCase_ : List[str] = min_resolution
lowerCAmelCase_ : Dict = max_resolution
lowerCAmelCase_ : Tuple = do_resize
lowerCAmelCase_ : Optional[Any] = size
lowerCAmelCase_ : Union[str, Any] = do_center_crop
lowerCAmelCase_ : Optional[Any] = crop_size
def lowercase_ ( self ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = MobileNetVaImageProcessingTester(self )
@property
def lowercase_ ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowercase , '''size''' ) )
self.assertTrue(hasattr(__lowercase , '''do_center_crop''' ) )
self.assertTrue(hasattr(__lowercase , '''crop_size''' ) )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 2_0} )
self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} )
lowerCAmelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} )
def lowercase_ ( self ) -> Tuple:
pass
def lowercase_ ( self ) -> Union[str, Any]:
# Initialize image_processing
lowerCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
lowerCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Tuple = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase_ ( self ) -> Optional[int]:
# Initialize image_processing
lowerCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
lowerCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Tuple = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase_ ( self ) -> Any:
# Initialize image_processing
lowerCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
lowerCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Dict = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , ) | 262 | 0 |
from collections.abc import Sequence
def lowercase__ ( __snake_case : List[str] = None ):
'''simple docstring'''
if nums is None or not nums:
raise ValueError('Input sequence should not be empty' )
UpperCAmelCase_ : List[Any] = nums[0]
for i in range(1 , len(lowerCAmelCase_ ) ):
UpperCAmelCase_ : Union[str, Any] = nums[i]
UpperCAmelCase_ : str = max(lowerCAmelCase_ , ans + num , lowerCAmelCase_ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
__UpperCAmelCase = int(input('Enter number of elements : ').strip())
__UpperCAmelCase = list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array))
| 29 |
from __future__ import annotations
import math
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase ) -> None:
lowerCAmelCase_ : str = size
# approximate the overall size of segment tree with given value
lowerCAmelCase_ : Dict = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
lowerCAmelCase_ : Dict = [0 for i in range(0 , 4 * size )]
lowerCAmelCase_ : Optional[int] = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowercase_ ( self , __lowercase ) -> int:
return idx * 2
def lowercase_ ( self , __lowercase ) -> int:
return idx * 2 + 1
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> None:
if left_element == right_element:
lowerCAmelCase_ : Tuple = a[left_element - 1]
else:
lowerCAmelCase_ : int = (left_element + right_element) // 2
self.build(self.left(__lowercase ) , __lowercase , __lowercase , __lowercase )
self.build(self.right(__lowercase ) , mid + 1 , __lowercase , __lowercase )
lowerCAmelCase_ : Any = max(
self.segment_tree[self.left(__lowercase )] , self.segment_tree[self.right(__lowercase )] )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> bool:
if self.flag[idx] is True:
lowerCAmelCase_ : Union[str, Any] = self.lazy[idx]
lowerCAmelCase_ : Union[str, Any] = False
if left_element != right_element:
lowerCAmelCase_ : Union[str, Any] = self.lazy[idx]
lowerCAmelCase_ : Any = self.lazy[idx]
lowerCAmelCase_ : List[str] = True
lowerCAmelCase_ : Optional[Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
lowerCAmelCase_ : Dict = val
if left_element != right_element:
lowerCAmelCase_ : Union[str, Any] = val
lowerCAmelCase_ : List[Any] = val
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : List[str] = True
return True
lowerCAmelCase_ : Optional[Any] = (left_element + right_element) // 2
self.update(self.left(__lowercase ) , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
self.update(self.right(__lowercase ) , mid + 1 , __lowercase , __lowercase , __lowercase , __lowercase )
lowerCAmelCase_ : int = max(
self.segment_tree[self.left(__lowercase )] , self.segment_tree[self.right(__lowercase )] )
return True
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> int | float:
if self.flag[idx] is True:
lowerCAmelCase_ : Union[str, Any] = self.lazy[idx]
lowerCAmelCase_ : Optional[Any] = False
if left_element != right_element:
lowerCAmelCase_ : List[Any] = self.lazy[idx]
lowerCAmelCase_ : Dict = self.lazy[idx]
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : Optional[int] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
lowerCAmelCase_ : List[Any] = (left_element + right_element) // 2
lowerCAmelCase_ : Tuple = self.query(self.left(__lowercase ) , __lowercase , __lowercase , __lowercase , __lowercase )
lowerCAmelCase_ : List[Any] = self.query(self.right(__lowercase ) , mid + 1 , __lowercase , __lowercase , __lowercase )
return max(__lowercase , __lowercase )
def __str__( self ) -> str:
return str([self.query(1 , 1 , self.size , __lowercase , __lowercase ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_UpperCAmelCase : str =[1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
_UpperCAmelCase : List[str] =15
_UpperCAmelCase : Any =SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt) | 262 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
snake_case_ = """pt"""
elif is_tf_available():
snake_case_ = """tf"""
else:
snake_case_ = """jax"""
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , unittest.TestCase ):
A_ : Union[str, Any] = PerceiverTokenizer
A_ : Optional[Any] = False
def a (self : Any ):
"""simple docstring"""
super().setUp()
__snake_case = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a (self : int ):
"""simple docstring"""
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def a (self : str , **a__ : Optional[Any] ):
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase )
def a (self : Any , a__ : Union[str, Any] , a__ : List[str]=False , a__ : List[Any]=20 , a__ : Dict=5 ):
"""simple docstring"""
__snake_case = []
for i in range(len(__lowercase ) ):
try:
__snake_case = tokenizer.decode([i] , clean_up_tokenization_spaces=__lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__snake_case = list(filter(lambda a__ : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , __lowercase ) )
__snake_case = list(filter(lambda a__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowercase ) , __lowercase ) )
if max_length is not None and len(__lowercase ) > max_length:
__snake_case = toks[:max_length]
if min_length is not None and len(__lowercase ) < min_length and len(__lowercase ) > 0:
while len(__lowercase ) < min_length:
__snake_case = toks + toks
# toks_str = [t[1] for t in toks]
__snake_case = [t[0] for t in toks]
# Ensure consistency
__snake_case = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
if " " not in output_txt and len(__lowercase ) > 1:
__snake_case = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowercase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowercase )
)
if with_prefix_space:
__snake_case = ''' ''' + output_txt
__snake_case = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
return output_txt, output_ids
def a (self : str ):
"""simple docstring"""
__snake_case = self.perceiver_tokenizer
__snake_case = '''Unicode €.'''
__snake_case = tokenizer(__lowercase )
__snake_case = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['''input_ids'''] , __lowercase )
# decoding
__snake_case = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , '''[CLS]Unicode €.[SEP]''' )
__snake_case = tokenizer('''e è é ê ë''' )
__snake_case = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['''input_ids'''] , __lowercase )
# decoding
__snake_case = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = self.perceiver_tokenizer
__snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
__snake_case = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__snake_case = tokenizer(__lowercase , padding=__lowercase , return_tensors=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
if FRAMEWORK != "jax":
__snake_case = list(batch.input_ids.numpy()[0] )
else:
__snake_case = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__lowercase , __lowercase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def a (self : int ):
"""simple docstring"""
__snake_case = self.perceiver_tokenizer
__snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__snake_case = tokenizer(__lowercase , padding=__lowercase , return_tensors=__lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , __lowercase )
self.assertIn('''attention_mask''' , __lowercase )
self.assertNotIn('''decoder_input_ids''' , __lowercase )
self.assertNotIn('''decoder_attention_mask''' , __lowercase )
def a (self : str ):
"""simple docstring"""
__snake_case = self.perceiver_tokenizer
__snake_case = [
'''Summary of the text.''',
'''Another summary.''',
]
__snake_case = tokenizer(
text_target=__lowercase , max_length=32 , padding='''max_length''' , truncation=__lowercase , return_tensors=__lowercase )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def a (self : str ):
"""simple docstring"""
__snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case = tempfile.mkdtemp()
__snake_case = ''' He is very happy, UNwant\u00E9d,running'''
__snake_case = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
tokenizer.save_pretrained(__lowercase )
__snake_case = tokenizer.__class__.from_pretrained(__lowercase )
__snake_case = after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
shutil.rmtree(__lowercase )
__snake_case = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case = tempfile.mkdtemp()
__snake_case = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
__snake_case = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
__snake_case = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
tokenizer.save_pretrained(__lowercase )
__snake_case = tokenizer.__class__.from_pretrained(__lowercase )
__snake_case = after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__snake_case = tokenizer.__class__.from_pretrained(__lowercase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__lowercase )
def a (self : Dict ):
"""simple docstring"""
__snake_case = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowercase )
with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
__snake_case = json.load(__lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
__snake_case = json.load(__lowercase )
__snake_case = [f"""<extra_id_{i}>""" for i in range(125 )]
__snake_case = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
__snake_case = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowercase , __lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowercase , __lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__snake_case = tokenizer_class.from_pretrained(
__lowercase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__snake_case = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=__lowercase )]
__snake_case = tokenizer_class.from_pretrained(
__lowercase , additional_special_tokens=__lowercase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '''�''' )
def a (self : Dict ):
"""simple docstring"""
pass
def a (self : List[Any] ):
"""simple docstring"""
pass
def a (self : Optional[Any] ):
"""simple docstring"""
pass
def a (self : int ):
"""simple docstring"""
pass
def a (self : int ):
"""simple docstring"""
__snake_case = self.get_tokenizers(fast=__lowercase , do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__snake_case = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
__snake_case = tokenizer.convert_tokens_to_string(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
| 24 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_UpperCAmelCase : Optional[int] ="""src/transformers"""
_UpperCAmelCase : str ="""docs/source/en"""
_UpperCAmelCase : Optional[int] ="""."""
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]:
with open(lowerCAmelCase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase_ : int = f.readlines()
# Find the start prompt.
lowerCAmelCase_ : List[Any] = 0
while not lines[start_index].startswith(lowerCAmelCase_ ):
start_index += 1
start_index += 1
lowerCAmelCase_ : List[str] = start_index
while not lines[end_index].startswith(lowerCAmelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_UpperCAmelCase : Optional[Any] ="""Model|Encoder|Decoder|ForConditionalGeneration"""
# Regexes that match TF/Flax/PT model names.
_UpperCAmelCase : Optional[int] =re.compile(R"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
_UpperCAmelCase : Dict =re.compile(R"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_UpperCAmelCase : Optional[Any] =re.compile(R"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# This is to make sure the transformers module imported is the one in the repo.
_UpperCAmelCase : Optional[int] =direct_transformers_import(TRANSFORMERS_PATH)
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[int]:
lowerCAmelCase_ : str = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , lowerCAmelCase_ )
return [m.group(0 ) for m in matches]
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
lowerCAmelCase_ : Tuple = 2 if text == '''✅''' or text == '''❌''' else len(lowerCAmelCase_ )
lowerCAmelCase_ : int = (width - text_length) // 2
lowerCAmelCase_ : Union[str, Any] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowerCAmelCase ( )-> str:
lowerCAmelCase_ : Any = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowerCAmelCase_ : Dict = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowerCAmelCase_ : List[Any] = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowerCAmelCase_ : Tuple = collections.defaultdict(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = collections.defaultdict(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[Any] = collections.defaultdict(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = collections.defaultdict(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = collections.defaultdict(lowerCAmelCase_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowerCAmelCase_ ):
lowerCAmelCase_ : Optional[int] = None
if attr_name.endswith('''Tokenizer''' ):
lowerCAmelCase_ : Union[str, Any] = slow_tokenizers
lowerCAmelCase_ : List[str] = attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
lowerCAmelCase_ : int = fast_tokenizers
lowerCAmelCase_ : Union[str, Any] = attr_name[:-13]
elif _re_tf_models.match(lowerCAmelCase_ ) is not None:
lowerCAmelCase_ : Tuple = tf_models
lowerCAmelCase_ : str = _re_tf_models.match(lowerCAmelCase_ ).groups()[0]
elif _re_flax_models.match(lowerCAmelCase_ ) is not None:
lowerCAmelCase_ : Tuple = flax_models
lowerCAmelCase_ : Union[str, Any] = _re_flax_models.match(lowerCAmelCase_ ).groups()[0]
elif _re_pt_models.match(lowerCAmelCase_ ) is not None:
lowerCAmelCase_ : Any = pt_models
lowerCAmelCase_ : List[Any] = _re_pt_models.match(lowerCAmelCase_ ).groups()[0]
if lookup_dict is not None:
while len(lowerCAmelCase_ ) > 0:
if attr_name in model_name_to_prefix.values():
lowerCAmelCase_ : Union[str, Any] = True
break
# Try again after removing the last word in the name
lowerCAmelCase_ : Any = ''''''.join(camel_case_split(lowerCAmelCase_ )[:-1] )
# Let's build that table!
lowerCAmelCase_ : int = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
lowerCAmelCase_ : Tuple = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowerCAmelCase_ : Union[str, Any] = [len(lowerCAmelCase_ ) + 2 for c in columns]
lowerCAmelCase_ : Optional[Any] = max([len(lowerCAmelCase_ ) for name in model_names] ) + 2
# Build the table per se
lowerCAmelCase_ : Dict = '''|''' + '''|'''.join([_center_text(lowerCAmelCase_ , lowerCAmelCase_ ) for c, w in zip(lowerCAmelCase_ , lowerCAmelCase_ )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
lowerCAmelCase_ : List[str] = {True: '''✅''', False: '''❌'''}
for name in model_names:
lowerCAmelCase_ : List[Any] = model_name_to_prefix[name]
lowerCAmelCase_ : Union[str, Any] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowerCAmelCase_ , lowerCAmelCase_ ) for l, w in zip(lowerCAmelCase_ , lowerCAmelCase_ )] ) + "|\n"
return table
def lowerCAmelCase ( lowerCAmelCase_=False )-> Tuple:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = _find_text_in_file(
filename=os.path.join(lowerCAmelCase_ , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , )
lowerCAmelCase_ : Tuple = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowerCAmelCase_ , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] =argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_UpperCAmelCase : Tuple =parser.parse_args()
check_model_table(args.fix_and_overwrite) | 262 | 0 |
'''simple docstring'''
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
SCREAMING_SNAKE_CASE_: int =getLogger(__name__)
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : List[Any] = 8 , snake_case_ : Any = 10_24 , snake_case_ : List[str]="val" , snake_case_ : List[Any]=None , snake_case_ : Optional[Any]=False , snake_case_ : Optional[Any]="summarization" , snake_case_ : Tuple=None , snake_case_ : str=1 , snake_case_ : Optional[int] = None , snake_case_ : Any="" , **snake_case_ : Optional[int] , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = str(lowerCAmelCase_ )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=lowerCAmelCase_ )
UpperCAmelCase_ = Path(lowerCAmelCase_ )
UpperCAmelCase_ = save_dir.joinpath(f"""rank_{local_rank}_output.json""" )
torch.cuda.set_device(lowerCAmelCase_ )
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ ).cuda()
if fpaa:
UpperCAmelCase_ = model.half()
# determine if we need to increase num_beams
use_task_specific_params(lowerCAmelCase_ , lowerCAmelCase_ ) # update config with task specific params
UpperCAmelCase_ = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCAmelCase_ = num_return_sequences
UpperCAmelCase_ = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCAmelCase_ = tokenizer.model_max_length
if prefix is None:
UpperCAmelCase_ = prefix or getattr(model.config , "prefix" , "" ) or ''''''
UpperCAmelCase_ = SeqaSeqDataset(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , max_target_length=10_24 , type_path=lowerCAmelCase_ , n_obs=lowerCAmelCase_ , prefix=lowerCAmelCase_ , **lowerCAmelCase_ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCAmelCase_ = ds.make_sortish_sampler(lowerCAmelCase_ , distributed=lowerCAmelCase_ , add_extra_examples=lowerCAmelCase_ , shuffle=lowerCAmelCase_ )
UpperCAmelCase_ = DataLoader(lowerCAmelCase_ , sampler=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , collate_fn=ds.collate_fn )
UpperCAmelCase_ = []
for batch in tqdm(lowerCAmelCase_ ):
UpperCAmelCase_ = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=lowerCAmelCase_ , num_beams=lowerCAmelCase_ , **lowerCAmelCase_ , )
UpperCAmelCase_ = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
UpperCAmelCase_ = batch['''ids''']
if num_return_sequences > 1:
UpperCAmelCase_ = chunks(lowerCAmelCase_ , lowerCAmelCase_ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(lowerCAmelCase_ ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(lowerCAmelCase_ , lowerCAmelCase_ )
return results, sampler.num_replicas
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=lowerCAmelCase_ , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=lowerCAmelCase_ , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=lowerCAmelCase_ , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=lowerCAmelCase_ , default=lowerCAmelCase_ )
parser.add_argument(
"--type_path" , type=lowerCAmelCase_ , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=lowerCAmelCase_ , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=lowerCAmelCase_ , default=8 , required=lowerCAmelCase_ , help="batch size" )
parser.add_argument(
"--local_rank" , type=lowerCAmelCase_ , default=-1 , required=lowerCAmelCase_ , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , required=lowerCAmelCase_ , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=lowerCAmelCase_ , default=1 , required=lowerCAmelCase_ , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=lowerCAmelCase_ , default=6_00 , required=lowerCAmelCase_ , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , required=lowerCAmelCase_ )
parser.add_argument("--tgt_lang" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , required=lowerCAmelCase_ )
parser.add_argument(
"--prefix" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , default=lowerCAmelCase_ , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
UpperCAmelCase_ = time.time()
UpperCAmelCase_ = parser.parse_known_args()
UpperCAmelCase_ = parse_numeric_n_bool_cl_kwargs(lowerCAmelCase_ )
if generate_kwargs and args.local_rank <= 0:
print(f"""parsed the following generate kwargs: {generate_kwargs}""" )
UpperCAmelCase_ = Path(args.save_dir + "_tmp" )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ ) # this handles locking.
UpperCAmelCase_ = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(f"""Found files at {json_save_dir} please move or remove them.""" )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCAmelCase_ = {}
if args.src_lang is not None:
UpperCAmelCase_ = args.src_lang
if args.tgt_lang is not None:
UpperCAmelCase_ = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=lowerCAmelCase_ )
UpperCAmelCase_ = eval_data_dir(
args.data_dir , lowerCAmelCase_ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=lowerCAmelCase_ , **lowerCAmelCase_ , )
if args.local_rank <= 0:
UpperCAmelCase_ = Path(args.save_dir )
save_dir.mkdir(exist_ok=lowerCAmelCase_ )
UpperCAmelCase_ = gather_results_from_each_node(lowerCAmelCase_ , lowerCAmelCase_ , args.sync_timeout )
UpperCAmelCase_ = combine_partial_results(lowerCAmelCase_ )
if args.num_return_sequences > 1:
UpperCAmelCase_ = save_dir.joinpath("pseudolabel_results.json" )
print(f"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" )
save_json(lowerCAmelCase_ , lowerCAmelCase_ )
return
UpperCAmelCase_ = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(lowerCAmelCase_ ) as f:
UpperCAmelCase_ = [x.rstrip() for x in f.readlines()][: len(lowerCAmelCase_ )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCAmelCase_ = '''translation''' in args.task
UpperCAmelCase_ = calculate_bleu if calc_bleu else calculate_rouge
UpperCAmelCase_ = '''bleu''' if calc_bleu else '''rouge'''
UpperCAmelCase_ = score_fn(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ = len(lowerCAmelCase_ )
UpperCAmelCase_ = time.time() - start_time
UpperCAmelCase_ = round(runtime / metrics["n_obs"] , 4 )
UpperCAmelCase_ = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCAmelCase_ = save_dir.joinpath(f"""{args.type_path}_{metric_name}.json""" )
save_json(lowerCAmelCase_ , lowerCAmelCase_ , indent=lowerCAmelCase_ )
print(lowerCAmelCase_ )
write_txt_file(lowerCAmelCase_ , save_dir.joinpath(f"""{args.type_path}_generations.txt""" ) )
if args.debug:
write_txt_file(lowerCAmelCase_ , save_dir.joinpath(f"""{args.type_path}.target""" ) )
else:
shutil.rmtree(lowerCAmelCase_ )
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> List:
'''simple docstring'''
UpperCAmelCase_ = []
for partial_result in partial_results:
records.extend(lowerCAmelCase_ )
UpperCAmelCase_ = sorted(lowerCAmelCase_ , key=lambda snake_case_ : x["id"] )
UpperCAmelCase_ = [x['''pred'''] for x in records]
return preds
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Any ) -> List[Dict[str, List]]:
'''simple docstring'''
UpperCAmelCase_ = time.time()
logger.info("waiting for all nodes to finish" )
UpperCAmelCase_ = None
while (time.time() - start_wait) < timeout:
UpperCAmelCase_ = list(save_dir.glob("rank_*.json" ) )
if len(lowerCAmelCase_ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCAmelCase_ = lmap(lowerCAmelCase_ , lowerCAmelCase_ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 1 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCAmelCase ( )-> int:
lowerCAmelCase_ : int = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
lowerCAmelCase_ : Dict = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(lowerCAmelCase_ )
DownloadCommand.register_subcommand(lowerCAmelCase_ )
EnvironmentCommand.register_subcommand(lowerCAmelCase_ )
RunCommand.register_subcommand(lowerCAmelCase_ )
ServeCommand.register_subcommand(lowerCAmelCase_ )
UserCommands.register_subcommand(lowerCAmelCase_ )
AddNewModelCommand.register_subcommand(lowerCAmelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCAmelCase_ )
LfsCommands.register_subcommand(lowerCAmelCase_ )
PTtoTFCommand.register_subcommand(lowerCAmelCase_ )
# Let's go
lowerCAmelCase_ : Union[str, Any] = parser.parse_args()
if not hasattr(lowerCAmelCase_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
lowerCAmelCase_ : List[Any] = args.func(lowerCAmelCase_ )
service.run()
if __name__ == "__main__":
main() | 262 | 0 |
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
a : Dict = logging.get_logger(__name__)
a : Union[str, Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
a : List[str] = {
"""vocab_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json""",
},
"""merges_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""Salesforce/codegen-350M-mono""": (
"""https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"""
),
},
}
a : int = {
"""Salesforce/codegen-350M-mono""": 2048,
}
class a ( UpperCAmelCase__ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["""input_ids""", """attention_mask"""]
snake_case_ = CodeGenTokenizer
def __init__( self : int , lowercase_ : Union[str, Any]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[int]="<|endoftext|>" , lowercase_ : int="<|endoftext|>" , lowercase_ : str="<|endoftext|>" , lowercase_ : str=False , **lowercase_ : int , ):
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , add_prefix_space=__lowercase , **__lowercase , )
if kwargs.pop('''add_bos_token''' , __lowercase ):
snake_case_ = kwargs.pop('''name_or_path''' , '''''' )
raise ValueError(
'''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'''
'''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'''
F"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"
F"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"
'''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'''
''' so that the fast tokenizer works correctly.''' )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __lowercase ) != add_prefix_space:
snake_case_ = getattr(__lowercase , pre_tok_state.pop('''type''' ) )
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**__lowercase )
snake_case_ = add_prefix_space
def A_ ( self : List[Any] , *lowercase_ : Optional[int] , **lowercase_ : Any ):
snake_case_ = kwargs.get('''is_split_into_words''' , __lowercase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__lowercase , **__lowercase )
def A_ ( self : str , *lowercase_ : Dict , **lowercase_ : str ):
snake_case_ = kwargs.get('''is_split_into_words''' , __lowercase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__lowercase , **__lowercase )
def A_ ( self : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : List[str] = None ):
snake_case_ = self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
def A_ ( self : Optional[int] , lowercase_ : int , lowercase_ : List[str] = False , lowercase_ : Dict = None , lowercase_ : Union[str, Any] = None , **lowercase_ : Dict , ):
snake_case_ = super().decode(
token_ids=__lowercase , skip_special_tokens=__lowercase , clean_up_tokenization_spaces=__lowercase , **__lowercase , )
if truncate_before_pattern is not None and len(__lowercase ) > 0:
snake_case_ = self.truncate(__lowercase , __lowercase )
return decoded_text
def A_ ( self : Dict , lowercase_ : str , lowercase_ : Tuple ):
def find_re(lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : str ):
snake_case_ = pattern.search(__lowercase , __lowercase )
return m.start() if m else -1
snake_case_ = [re.compile(__lowercase , re.MULTILINE ) for pattern in truncate_before_pattern]
snake_case_ = list(re.finditer('''^print''' , __lowercase , re.MULTILINE ) )
if len(__lowercase ) > 1:
snake_case_ = completion[: prints[1].start()]
snake_case_ = list(re.finditer('''^def''' , __lowercase , re.MULTILINE ) )
if len(__lowercase ) > 1:
snake_case_ = completion[: defs[1].start()]
snake_case_ = 0
snake_case_ = [
pos for pos in [find_re(__lowercase , __lowercase , __lowercase ) for terminal in terminals] if pos != -1
]
if len(__lowercase ) > 0:
return completion[: min(__lowercase )]
else:
return completion
| 56 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_UpperCAmelCase : Tuple =None
_UpperCAmelCase : int =logging.get_logger(__name__)
_UpperCAmelCase : Dict ={"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Any ={
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : int ={
"""facebook/nllb-large-en-ro""": 1024,
"""facebook/nllb-200-distilled-600M""": 1024,
}
# fmt: off
_UpperCAmelCase : Any =["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE__ : int = NllbTokenizer
SCREAMING_SNAKE_CASE__ : List[int] = []
SCREAMING_SNAKE_CASE__ : List[int] = []
def __init__( self , __lowercase=None , __lowercase=None , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=False , **__lowercase , ) -> int:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : int = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
lowerCAmelCase_ : List[Any] = legacy_behaviour
super().__init__(
vocab_file=__lowercase , tokenizer_file=__lowercase , bos_token=__lowercase , eos_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , src_lang=__lowercase , tgt_lang=__lowercase , additional_special_tokens=__lowercase , legacy_behaviour=__lowercase , **__lowercase , )
lowerCAmelCase_ : Any = vocab_file
lowerCAmelCase_ : List[Any] = False if not self.vocab_file else True
lowerCAmelCase_ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
lowerCAmelCase_ : Optional[Any] = {
lang_code: self.convert_tokens_to_ids(__lowercase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCAmelCase_ : Any = src_lang if src_lang is not None else '''eng_Latn'''
lowerCAmelCase_ : str = self.convert_tokens_to_ids(self._src_lang )
lowerCAmelCase_ : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowercase_ ( self ) -> str:
return self._src_lang
@src_lang.setter
def lowercase_ ( self , __lowercase ) -> None:
lowerCAmelCase_ : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , **__lowercase ) -> str:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowerCAmelCase_ : List[str] = src_lang
lowerCAmelCase_ : int = self(__lowercase , add_special_tokens=__lowercase , return_tensors=__lowercase , **__lowercase )
lowerCAmelCase_ : Dict = self.convert_tokens_to_ids(__lowercase )
lowerCAmelCase_ : List[Any] = tgt_lang_id
return inputs
def lowercase_ ( self , __lowercase , __lowercase = "eng_Latn" , __lowercase = None , __lowercase = "fra_Latn" , **__lowercase , ) -> BatchEncoding:
lowerCAmelCase_ : List[str] = src_lang
lowerCAmelCase_ : List[str] = tgt_lang
return super().prepare_seqaseq_batch(__lowercase , __lowercase , **__lowercase )
def lowercase_ ( self ) -> List[Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def lowercase_ ( self ) -> str:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase_ ( self , __lowercase ) -> None:
lowerCAmelCase_ : List[str] = self.convert_tokens_to_ids(__lowercase )
if self.legacy_behaviour:
lowerCAmelCase_ : Any = []
lowerCAmelCase_ : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase_ : Optional[int] = [self.cur_lang_code]
lowerCAmelCase_ : List[Any] = [self.eos_token_id]
lowerCAmelCase_ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase_ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase_ : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowercase_ ( self , __lowercase ) -> None:
lowerCAmelCase_ : Dict = self.convert_tokens_to_ids(__lowercase )
if self.legacy_behaviour:
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : Any = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase_ : Any = [self.cur_lang_code]
lowerCAmelCase_ : Any = [self.eos_token_id]
lowerCAmelCase_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase_ : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase_ : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
lowerCAmelCase_ : Any = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,) | 262 | 0 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
@staticmethod
def lowercase__ ( *snake_case__ , **snake_case__ ):
"""simple docstring"""
pass
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
lowerCAmelCase__ = (
"""https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"""
)
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
a : int =MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = pipeline(
"document-question-answering" , model=__lowercase , tokenizer=__lowercase , image_processor=__lowercase )
lowerCAmelCase : Optional[int] = INVOICE_URL
lowerCAmelCase : List[Any] = list(zip(*apply_tesseract(load_image(__lowercase ) , __lowercase , "" ) ) )
lowerCAmelCase : Optional[Any] = '''What is the placebo?'''
lowerCAmelCase : Dict = [
{
'''image''': load_image(__lowercase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = dqa_pipeline(__lowercase , top_k=2 )
self.assertEqual(
__lowercase , [
[
{"score": ANY(__lowercase ), "answer": ANY(__lowercase ), "start": ANY(__lowercase ), "end": ANY(__lowercase )},
{"score": ANY(__lowercase ), "answer": ANY(__lowercase ), "start": ANY(__lowercase ), "end": ANY(__lowercase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
lowerCAmelCase : Tuple = INVOICE_URL
lowerCAmelCase : int = '''How many cats are there?'''
lowerCAmelCase : Dict = [
{'''score''': 0.0001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
lowerCAmelCase : Dict = dqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(nested_simplify(__lowercase , decimals=4 ) , __lowercase )
lowerCAmelCase : Tuple = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(__lowercase , decimals=4 ) , __lowercase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowerCAmelCase : Any = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowerCAmelCase : List[str] = dqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(__lowercase , [] )
# We can optionnally pass directly the words and bounding boxes
lowerCAmelCase : int = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowerCAmelCase : Any = []
lowerCAmelCase : Dict = []
lowerCAmelCase : List[str] = dqa_pipeline(image=__lowercase , question=__lowercase , words=__lowercase , boxes=__lowercase , top_k=2 )
self.assertEqual(__lowercase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
lowerCAmelCase : Dict = INVOICE_URL
lowerCAmelCase : int = '''What is the invoice number?'''
lowerCAmelCase : List[Any] = dqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCAmelCase : Optional[int] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCAmelCase : int = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
lowerCAmelCase : List[Any] = INVOICE_URL
lowerCAmelCase : str = '''What is the invoice number?'''
lowerCAmelCase : List[str] = dqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCAmelCase : Optional[int] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCAmelCase : List[str] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__lowercase )
lowerCAmelCase : int = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__lowercase , revision="3dc6de3" , )
lowerCAmelCase : Optional[int] = INVOICE_URL
lowerCAmelCase : Dict = '''What is the invoice number?'''
lowerCAmelCase : Optional[int] = dqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
lowerCAmelCase : Tuple = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
lowerCAmelCase : Optional[Any] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
lowerCAmelCase : str = list(zip(*apply_tesseract(load_image(__lowercase ) , __lowercase , "" ) ) )
# This model should also work if `image` is set to None
lowerCAmelCase : Any = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__lowercase )
lowerCAmelCase : Optional[int] = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__lowercase , revision="3dc6de3" , max_seq_len=50 , )
lowerCAmelCase : List[str] = INVOICE_URL
lowerCAmelCase : Tuple = '''What is the invoice number?'''
lowerCAmelCase : Optional[Any] = dqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCAmelCase : Any = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
lowerCAmelCase : Optional[Any] = list(zip(*apply_tesseract(load_image(__lowercase ) , __lowercase , "" ) ) )
# This model should also work if `image` is set to None
lowerCAmelCase : Union[str, Any] = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
lowerCAmelCase : List[Any] = INVOICE_URL
lowerCAmelCase : List[Any] = '''What is the invoice number?'''
lowerCAmelCase : Optional[Any] = dqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(nested_simplify(__lowercase , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def lowercase__ ( self ):
"""simple docstring"""
pass
| 108 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
_UpperCAmelCase : Optional[Any] =NewType("""DataClass""", Any)
_UpperCAmelCase : Dict =NewType("""DataClassType""", Any)
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def lowerCAmelCase ( lowerCAmelCase_ )-> Callable[[str], Any]:
lowerCAmelCase_ : str = {str(lowerCAmelCase_ ): choice for choice in choices}
return lambda lowerCAmelCase_ : str_to_choice.get(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( *,
lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = dataclasses.MISSING , lowerCAmelCase_ = dataclasses.MISSING , lowerCAmelCase_ = None , **lowerCAmelCase_ , )-> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowerCAmelCase_ : Dict = {}
if aliases is not None:
lowerCAmelCase_ : str = aliases
if help is not None:
lowerCAmelCase_ : Tuple = help
return dataclasses.field(metadata=lowerCAmelCase_ , default=lowerCAmelCase_ , default_factory=lowerCAmelCase_ , **lowerCAmelCase_ )
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Iterable[DataClassType]
def __init__( self , __lowercase , **__lowercase ) -> List[str]:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
lowerCAmelCase_ : Optional[int] = ArgumentDefaultsHelpFormatter
super().__init__(**__lowercase )
if dataclasses.is_dataclass(__lowercase ):
lowerCAmelCase_ : Union[str, Any] = [dataclass_types]
lowerCAmelCase_ : List[Any] = list(__lowercase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__lowercase )
@staticmethod
def lowercase_ ( __lowercase , __lowercase ) -> Union[str, Any]:
lowerCAmelCase_ : Optional[Any] = f"""--{field.name}"""
lowerCAmelCase_ : Tuple = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __lowercase ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
lowerCAmelCase_ : List[str] = kwargs.pop('''aliases''' , [] )
if isinstance(__lowercase , __lowercase ):
lowerCAmelCase_ : Optional[Any] = [aliases]
lowerCAmelCase_ : Any = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(__lowercase , '''UnionType''' ) and isinstance(__lowercase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__lowercase ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
f""" Problem encountered in field '{field.name}'.""" )
if type(__lowercase ) not in field.type.__args__:
# filter `str` in Union
lowerCAmelCase_ : List[Any] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowerCAmelCase_ : Dict = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowerCAmelCase_ : str = (
field.type.__args__[0] if isinstance(__lowercase , field.type.__args__[1] ) else field.type.__args__[1]
)
lowerCAmelCase_ : List[Any] = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowerCAmelCase_ : Dict = {}
if origin_type is Literal or (isinstance(field.type , __lowercase ) and issubclass(field.type , __lowercase )):
if origin_type is Literal:
lowerCAmelCase_ : Optional[Any] = field.type.__args__
else:
lowerCAmelCase_ : int = [x.value for x in field.type]
lowerCAmelCase_ : str = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
lowerCAmelCase_ : str = field.default
else:
lowerCAmelCase_ : Tuple = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowerCAmelCase_ : Tuple = copy(__lowercase )
# Hack because type=bool in argparse does not behave as we want.
lowerCAmelCase_ : Dict = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowerCAmelCase_ : Union[str, Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowerCAmelCase_ : List[str] = default
# This tells argparse we accept 0 or 1 value after --field_name
lowerCAmelCase_ : int = '''?'''
# This is the value that will get picked if we do --field_name (without value)
lowerCAmelCase_ : List[Any] = True
elif isclass(__lowercase ) and issubclass(__lowercase , __lowercase ):
lowerCAmelCase_ : Union[str, Any] = field.type.__args__[0]
lowerCAmelCase_ : Dict = '''+'''
if field.default_factory is not dataclasses.MISSING:
lowerCAmelCase_ : Any = field.default_factory()
elif field.default is dataclasses.MISSING:
lowerCAmelCase_ : Optional[int] = True
else:
lowerCAmelCase_ : List[Any] = field.type
if field.default is not dataclasses.MISSING:
lowerCAmelCase_ : Dict = field.default
elif field.default_factory is not dataclasses.MISSING:
lowerCAmelCase_ : List[Any] = field.default_factory()
else:
lowerCAmelCase_ : int = True
parser.add_argument(__lowercase , *__lowercase , **__lowercase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowerCAmelCase_ : Any = False
parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **__lowercase )
def lowercase_ ( self , __lowercase ) -> List[Any]:
if hasattr(__lowercase , '''_argument_group_name''' ):
lowerCAmelCase_ : str = self.add_argument_group(dtype._argument_group_name )
else:
lowerCAmelCase_ : Dict = self
try:
lowerCAmelCase_ : Dict[str, type] = get_type_hints(__lowercase )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(__lowercase ):
lowerCAmelCase_ : Any = '''.'''.join(map(__lowercase , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(__lowercase ):
if not field.init:
continue
lowerCAmelCase_ : Optional[int] = type_hints[field.name]
self._parse_dataclass_field(__lowercase , __lowercase )
def lowercase_ ( self , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=None , __lowercase=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowerCAmelCase_ : str = []
if args_filename:
args_files.append(Path(__lowercase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowerCAmelCase_ : str = ArgumentParser()
args_file_parser.add_argument(__lowercase , type=__lowercase , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = args_file_parser.parse_known_args(args=__lowercase )
lowerCAmelCase_ : int = vars(__lowercase ).get(args_file_flag.lstrip('''-''' ) , __lowercase )
if cmd_args_file_paths:
args_files.extend([Path(__lowercase ) for p in cmd_args_file_paths] )
lowerCAmelCase_ : Dict = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowerCAmelCase_ : Any = file_args + args if args is not None else file_args + sys.argv[1:]
lowerCAmelCase_ , lowerCAmelCase_ : Dict = self.parse_known_args(args=__lowercase )
lowerCAmelCase_ : Any = []
for dtype in self.dataclass_types:
lowerCAmelCase_ : str = {f.name for f in dataclasses.fields(__lowercase ) if f.init}
lowerCAmelCase_ : str = {k: v for k, v in vars(__lowercase ).items() if k in keys}
for k in keys:
delattr(__lowercase , __lowercase )
lowerCAmelCase_ : Optional[int] = dtype(**__lowercase )
outputs.append(__lowercase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__lowercase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def lowercase_ ( self , __lowercase , __lowercase = False ) -> Tuple[DataClass, ...]:
lowerCAmelCase_ : int = set(args.keys() )
lowerCAmelCase_ : str = []
for dtype in self.dataclass_types:
lowerCAmelCase_ : int = {f.name for f in dataclasses.fields(__lowercase ) if f.init}
lowerCAmelCase_ : List[str] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowerCAmelCase_ : List[str] = dtype(**__lowercase )
outputs.append(__lowercase )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__lowercase )}""" )
return tuple(__lowercase )
def lowercase_ ( self , __lowercase , __lowercase = False ) -> Tuple[DataClass, ...]:
with open(Path(__lowercase ) , encoding='''utf-8''' ) as open_json_file:
lowerCAmelCase_ : Dict = json.loads(open_json_file.read() )
lowerCAmelCase_ : str = self.parse_dict(__lowercase , allow_extra_keys=__lowercase )
return tuple(__lowercase )
def lowercase_ ( self , __lowercase , __lowercase = False ) -> Tuple[DataClass, ...]:
lowerCAmelCase_ : Optional[Any] = self.parse_dict(yaml.safe_load(Path(__lowercase ).read_text() ) , allow_extra_keys=__lowercase )
return tuple(__lowercase ) | 262 | 0 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
@staticmethod
def lowerCamelCase ( *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
lowerCAmelCase__ = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
__lowerCamelCase = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = object_detector(examples[0] , threshold=0.0 )
__lowerCamelCase = len(__lowercase )
self.assertGreater(__lowercase , 0 )
self.assertEqual(
__lowercase , [
{
'''score''': ANY(__lowercase ),
'''label''': ANY(__lowercase ),
'''box''': {'''xmin''': ANY(__lowercase ), '''ymin''': ANY(__lowercase ), '''xmax''': ANY(__lowercase ), '''ymax''': ANY(__lowercase )},
}
for i in range(__lowercase )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
@require_torch
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
__lowerCamelCase = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.7_235, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7_218, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7_184, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.6_748, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_656, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_614, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_456, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.6_419, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
] , )
__lowerCamelCase = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{'''score''': 0.7_235, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7_218, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7_184, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.6_748, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_656, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_614, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_456, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.6_419, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
]
] , )
@require_torch
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = pipeline('''zero-shot-object-detection''' )
__lowerCamelCase = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2_537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1_474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1_208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
] , )
__lowerCamelCase = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2_537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1_474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1_208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
[
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2_537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1_474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1_208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
@require_torch
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 0.2
__lowerCamelCase = pipeline('''zero-shot-object-detection''' )
__lowerCamelCase = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=__lowercase , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2_537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
] , )
@require_torch
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 2
__lowerCamelCase = pipeline('''zero-shot-object-detection''' )
__lowerCamelCase = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=__lowercase , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
] , )
| 330 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def lowerCAmelCase ( lowerCAmelCase_ )-> Union[str, Any]:
return EnvironmentCommand()
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
@staticmethod
def lowercase_ ( __lowercase ) -> List[Any]:
lowerCAmelCase_ : List[str] = parser.add_parser('''env''' )
download_parser.set_defaults(func=__lowercase )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Optional[Any] = huggingface_hub.__version__
lowerCAmelCase_ : str = '''not installed'''
lowerCAmelCase_ : str = '''NA'''
if is_torch_available():
import torch
lowerCAmelCase_ : Any = torch.__version__
lowerCAmelCase_ : str = torch.cuda.is_available()
lowerCAmelCase_ : List[str] = '''not installed'''
if is_transformers_available():
import transformers
lowerCAmelCase_ : Any = transformers.__version__
lowerCAmelCase_ : Optional[Any] = '''not installed'''
if is_accelerate_available():
import accelerate
lowerCAmelCase_ : List[Any] = accelerate.__version__
lowerCAmelCase_ : List[str] = '''not installed'''
if is_xformers_available():
import xformers
lowerCAmelCase_ : Optional[Any] = xformers.__version__
lowerCAmelCase_ : int = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(__lowercase ) )
return info
@staticmethod
def lowercase_ ( __lowercase ) -> str:
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n" | 262 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> float:
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
lowercase__ : Union[str, Any] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCAmelCase_ ) )
return round(lowerCAmelCase_ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class snake_case__( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = JukeboxTokenizer
SCREAMING_SNAKE_CASE__ : int = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def lowercase_ ( self ) -> Union[str, Any]:
import torch
lowerCAmelCase_ : Union[str, Any] = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
lowerCAmelCase_ : Any = tokenizer(**self.metas )['''input_ids''']
# fmt: off
lowerCAmelCase_ : List[str] = [
torch.tensor([[
0, 0, 0, 7_1_6_9, 5_0_7, 9, 7_6, 3_9, 3_1, 4_6, 7_6, 2_7,
7_6, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8, 3_1, 4_4, 7_6, 3_2,
4_4, 4_1, 3_9, 7_6, 2_7, 4_0, 7_6, 2_7, 4_0, 4_6, 3_5, 4_3,
4_7, 3_1, 7_6, 3_8, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 4_1, 7_6, 4_5, 2_7, 3_5,
3_0, 7_6, 7_1, 2_0, 4_9, 4_1, 7_6, 4_8, 2_7, 4_5, 4_6, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 4_4, 4_7, 4_0, 3_7, 3_8, 3_1, 4_5,
4_5, 7_6, 3_8, 3_1, 3_3, 4_5, 7_6, 4_1, 3_2, 7_6, 4_5, 4_6,
4_1, 4_0, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
1_9, 4_6, 2_7, 4_0, 3_0, 7_6, 3_5, 4_0, 7_6, 4_6, 3_4, 3_1,
7_6, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3, 7_6, 6_3, 7_6, 6_3,
7_6, 6_3, 7_6, 1_4, 3_1, 2_7, 4_4, 7_6, 4_6, 3_4, 3_1, 3_9,
6_4, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_5, 2_7, 4_0,
3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 8,
2_7, 3_8, 3_2, 7_6, 4_5, 4_7, 4_0, 3_7, 7_6, 2_7, 7_6, 4_5,
3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0, 7_6, 4_8, 3_5, 4_5,
2_7, 3_3, 3_1, 7_6, 3_8, 3_5, 3_1, 4_5, 6_4, 7_6, 4_9, 3_4,
4_1, 4_5, 3_1, 7_6, 3_2, 4_4, 4_1, 4_9, 4_0, 6_4, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6, 4_9,
4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_6, 3_8, 3_5, 4_2, 6_4,
7_6, 2_7, 4_0, 3_0, 7_6, 4_5, 4_0, 3_1, 3_1, 4_4, 7_6, 4_1,
3_2, 7_6, 2_9, 4_1, 3_8, 3_0, 7_6, 2_9, 4_1, 3_9, 3_9, 2_7,
4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_5, 4_6,
4_5, 7_6, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6, 4_1, 4_4, 7_6, 4_9,
3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 4_1, 4_5, 3_1, 7_6, 4_2, 2_7,
4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_6, 4_4, 3_1, 2_7, 3_0, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 3_5, 2_9,
3_4, 7_6, 5_1, 3_1, 4_6, 7_6, 4_5, 4_7, 4_4, 4_8, 3_5, 4_8,
3_1, 6_4, 7_6, 4_5, 4_6, 2_7, 3_9, 4_2, 3_1, 3_0, 7_6, 4_1,
4_0, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 3_8, 3_5, 3_2, 3_1,
3_8, 3_1, 4_5, 4_5, 7_6, 4_6, 3_4, 3_5, 4_0, 3_3, 4_5, 6_4,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_4, 3_1,
7_6, 3_4, 2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_9,
4_1, 2_9, 3_7, 3_1, 3_0, 7_6, 4_6, 3_4, 3_1, 3_9, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_4, 3_1, 2_7, 4_4,
4_6, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_2, 3_1, 3_0, 6_6, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6,
4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_2, 3_1, 3_0, 3_1, 4_5,
4_6, 2_7, 3_8, 6_4, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 4_9,
4_1, 4_4, 3_0, 4_5, 7_6, 2_7, 4_2, 4_2, 3_1, 2_7, 4_4, 6_5,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_3, 5_1, 7_6,
4_0, 2_7, 3_9, 3_1, 7_6, 3_5, 4_5, 7_6, 1_5, 5_2, 5_1, 3_9,
2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_6, 1_1, 3_5, 4_0, 3_3,
7_6, 4_1, 3_2, 7_6, 1_1, 3_5, 4_0, 3_3, 4_5, 6_6, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_2, 4_1, 4_1, 3_7, 7_6,
4_1, 4_0, 7_6, 3_9, 5_1, 7_6, 2_3, 4_1, 4_4, 3_7, 4_5, 6_4,
7_6, 5_1, 3_1, 7_6, 1_3, 3_5, 3_3, 3_4, 4_6, 5_1, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 3_0, 3_1, 4_5, 4_2, 2_7, 3_5, 4_4, 6_7,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_4, 4_1, 4_6,
3_4, 3_5, 4_0, 3_3, 7_6, 2_8, 3_1, 4_5, 3_5, 3_0, 3_1, 7_6,
4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3, 7_6, 1_8, 4_1, 4_7,
4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_0, 3_1, 2_9, 2_7, 5_1,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_5, 3_2, 7_6,
4_6, 3_4, 2_7, 4_6, 7_6, 2_9, 4_1, 3_8, 4_1, 4_5, 4_5, 2_7,
3_8, 7_6, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4, 7_6, 2_8, 4_1, 4_7,
4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_6, 2_7, 4_0, 3_0, 7_6, 2_8,
2_7, 4_4, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_4, 3_1, 7_6, 3_8, 4_1, 4_0, 3_1, 7_6, 2_7, 4_0, 3_0,
7_6, 3_8, 3_1, 4_8, 3_1, 3_8, 7_6, 4_5, 2_7, 4_0, 3_0, 4_5,
7_6, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4, 7_6, 3_2, 2_7, 4_4,
7_6, 2_7, 4_9, 2_7, 5_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
7_6, 7_6]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def lowercase_ ( self ) -> List[Any]:
import torch
lowerCAmelCase_ : Any = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
lowerCAmelCase_ : str = tokenizer(**self.metas )['''input_ids''']
# fmt: off
lowerCAmelCase_ : Tuple = [
torch.tensor([[
0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1, 9, 7_7, 3_9,
3_1, 4_6, 7_7, 2_7, 7_7, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8,
3_1, 4_4, 7_7, 3_2, 4_4, 4_1, 3_9, 7_7, 2_7, 4_0, 7_7, 2_7,
4_0, 4_6, 3_5, 4_3, 4_7, 3_1, 7_7, 3_8, 2_7, 4_0, 3_0, 6_4,
7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_3, 3_4, 4_1,
7_7, 4_5, 2_7, 3_5, 3_0, 7_7, 7_2, 2_0, 4_9, 4_1, 7_7, 4_8,
2_7, 4_5, 4_6, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 4_4, 4_7, 4_0,
3_7, 3_8, 3_1, 4_5, 4_5, 7_7, 3_8, 3_1, 3_3, 4_5, 7_7, 4_1,
3_2, 7_7, 4_5, 4_6, 4_1, 4_0, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 1_9, 4_6, 2_7, 4_0, 3_0, 7_7, 3_5, 4_0,
7_7, 4_6, 3_4, 3_1, 7_7, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3,
7_7, 6_3, 7_7, 6_3, 7_7, 6_3, 7_7, 1_4, 3_1, 2_7, 4_4, 7_7,
4_6, 3_4, 3_1, 3_9, 6_4, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1,
7_7, 4_5, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 8, 2_7, 3_8, 3_2, 7_7, 4_5, 4_7, 4_0, 3_7,
7_7, 2_7, 7_7, 4_5, 3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0,
7_7, 4_8, 3_5, 4_5, 2_7, 3_3, 3_1, 7_7, 3_8, 3_5, 3_1, 4_5,
6_4, 7_7, 4_9, 3_4, 4_1, 4_5, 3_1, 7_7, 3_2, 4_4, 4_1, 4_9,
4_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1,
4_0, 3_0, 7_7, 4_9, 4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_7,
3_8, 3_5, 4_2, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_5, 4_0, 3_1,
3_1, 4_4, 7_7, 4_1, 3_2, 7_7, 2_9, 4_1, 3_8, 3_0, 7_7, 2_9,
4_1, 3_9, 3_9, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 2_7,
4_6, 7_7, 3_5, 4_6, 4_5, 7_7, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6,
4_1, 4_4, 7_7, 4_9, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 4_1, 4_5,
3_1, 7_7, 4_2, 2_7, 4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_7, 4_4,
3_1, 2_7, 3_0, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
2_3, 3_4, 3_5, 2_9, 3_4, 7_7, 5_1, 3_1, 4_6, 7_7, 4_5, 4_7,
4_4, 4_8, 3_5, 4_8, 3_1, 6_4, 7_7, 4_5, 4_6, 2_7, 3_9, 4_2,
3_1, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 4_5, 3_1, 7_7,
3_8, 3_5, 3_2, 3_1, 3_8, 3_1, 4_5, 4_5, 7_7, 4_6, 3_4, 3_5,
4_0, 3_3, 4_5, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 2_0, 3_4, 3_1, 7_7, 3_4, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4,
2_7, 4_6, 7_7, 3_9, 4_1, 2_9, 3_7, 3_1, 3_0, 7_7, 4_6, 3_4,
3_1, 3_9, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7,
3_4, 3_1, 2_7, 4_4, 4_6, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_2,
3_1, 3_0, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
1, 4_0, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 4_2,
3_1, 3_0, 3_1, 4_5, 4_6, 2_7, 3_8, 6_4, 7_7, 4_6, 3_4, 3_1,
4_5, 3_1, 7_7, 4_9, 4_1, 4_4, 3_0, 4_5, 7_7, 2_7, 4_2, 4_2,
3_1, 2_7, 4_4, 6_5, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_3, 5_1, 7_7, 4_0, 2_7, 3_9, 3_1, 7_7, 3_5, 4_5, 7_7,
1_5, 5_2, 5_1, 3_9, 2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_7,
1_1, 3_5, 4_0, 3_3, 7_7, 4_1, 3_2, 7_7, 1_1, 3_5, 4_0, 3_3,
4_5, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_2,
4_1, 4_1, 3_7, 7_7, 4_1, 4_0, 7_7, 3_9, 5_1, 7_7, 2_3, 4_1,
4_4, 3_7, 4_5, 6_4, 7_7, 5_1, 3_1, 7_7, 1_3, 3_5, 3_3, 3_4,
4_6, 5_1, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 3_0, 3_1, 4_5, 4_2,
2_7, 3_5, 4_4, 6_7, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_4, 4_1, 4_6, 3_4, 3_5, 4_0, 3_3, 7_7, 2_8, 3_1, 4_5,
3_5, 3_0, 3_1, 7_7, 4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3,
7_7, 1_8, 4_1, 4_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_0,
3_1, 2_9, 2_7, 5_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_5, 3_2, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 2_9, 4_1, 3_8,
4_1, 4_5, 4_5, 2_7, 3_8, 7_7, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4,
7_7, 2_8, 4_1, 4_7, 4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_7, 2_7,
4_0, 3_0, 7_7, 2_8, 2_7, 4_4, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_4, 3_1, 7_7, 3_8, 4_1, 4_0, 3_1,
7_7, 2_7, 4_0, 3_0, 7_7, 3_8, 3_1, 4_8, 3_1, 3_8, 7_7, 4_5,
2_7, 4_0, 3_0, 4_5, 7_7, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4,
7_7, 3_2, 2_7, 4_4, 7_7, 2_7, 4_9, 2_7, 5_1, 7_9, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 7_7, 7_7]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) | 262 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.