code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = '▁'
lowercase_ = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class snake_case ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[Any] = BertGenerationTokenizer
A_ : str = False
A_ : Any = True
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
super().setUp()
__A = BertGenerationTokenizer(__lowerCamelCase, keep_accents=__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
__A = '''<s>'''
__A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ), __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ), __lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '''<unk>''' )
self.assertEqual(vocab_keys[1], '''<s>''' )
self.assertEqual(vocab_keys[-1], '''<pad>''' )
self.assertEqual(len(__lowerCamelCase ), 10_02 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 10_00 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = BertGenerationTokenizer(__lowerCamelCase, keep_accents=__lowerCamelCase )
__A = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowerCamelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ), [2_85, 46, 10, 1_70, 3_82], )
__A = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowerCamelCase, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
], )
__A = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase, [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4], )
__A = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
], )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = '''Hello World!'''
__A = [1_85_36, 22_60, 1_01]
self.assertListEqual(__lowerCamelCase, self.big_tokenizer.encode(__lowerCamelCase ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
__A = [
8_71,
4_19,
3_58,
9_46,
9_91,
25_21,
4_52,
3_58,
13_57,
3_87,
77_51,
35_36,
1_12,
9_85,
4_56,
1_26,
8_65,
9_38,
54_00,
57_34,
4_58,
13_68,
4_67,
7_86,
24_62,
52_46,
11_59,
6_33,
8_65,
45_19,
4_57,
5_82,
8_52,
25_57,
4_27,
9_16,
5_08,
4_05,
3_43_24,
4_97,
3_91,
4_08,
1_13_42,
12_44,
3_85,
1_00,
9_38,
9_85,
4_56,
5_74,
3_62,
1_25_97,
32_00,
31_29,
11_72,
]
self.assertListEqual(__lowerCamelCase, self.big_tokenizer.encode(__lowerCamelCase ) )
@require_torch
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
__A = list(self.big_tokenizer.get_vocab().keys() )[:10]
__A = ''' '''.join(__lowerCamelCase )
__A = self.big_tokenizer.encode_plus(__lowerCamelCase, return_tensors='''pt''', return_token_type_ids=__lowerCamelCase )
__A = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence], return_tensors='''pt''', return_token_type_ids=__lowerCamelCase )
__A = BertGenerationConfig()
__A = BertGenerationEncoder(__lowerCamelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__lowerCamelCase )
model(**__lowerCamelCase )
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = {'''input_ids''': [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase, model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''', revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''', )
| 266 |
# Function to print upper half of diamond (pyramid)
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[int]):
'''simple docstring'''
for i in range(0 ,lowerCamelCase_):
for _ in range(0 ,n - i - 1): # printing spaces
print(''' ''' ,end='''''')
for _ in range(0 ,i + 1): # printing stars
print('''* ''' ,end='''''')
print()
def lowerCAmelCase__ ( lowerCamelCase_ : str):
'''simple docstring'''
for i in range(lowerCamelCase_ ,0 ,-1):
for _ in range(lowerCamelCase_ ,0 ,-1): # printing stars
print('''* ''' ,end='''''')
print()
for _ in range(n - i + 1 ,0 ,-1): # printing spaces
print(''' ''' ,end='''''')
def lowerCAmelCase__ ( lowerCamelCase_ : Tuple):
'''simple docstring'''
if n <= 0:
print(''' ... .... nothing printing :(''')
return
floyd(lowerCamelCase_) # upper half
reverse_floyd(lowerCamelCase_) # lower half
if __name__ == "__main__":
print(R'| /\ | |- | |- |--| |\ /| |-')
print(R'|/ \| |- |_ |_ |__| | \/ | |_')
__snake_case : int =1
while K:
__snake_case : Optional[int] =int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
__snake_case : str =int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...')
| 129 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : str = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
SCREAMING_SNAKE_CASE : Optional[int] = {
'''RUCAIBox/mvp''': 1_0_2_4,
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['input_ids', 'attention_mask']
__UpperCamelCase = MvpTokenizer
def __init__(self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="replace" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase=False , lowerCamelCase=True , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , errors=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , unk_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase , **lowerCamelCase , )
_lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , lowerCamelCase ) != add_prefix_space:
_lowerCAmelCase = getattr(lowerCamelCase , pre_tok_state.pop("""type""" ) )
_lowerCAmelCase = add_prefix_space
_lowerCAmelCase = pre_tok_class(**lowerCamelCase )
_lowerCAmelCase = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCAmelCase = """post_processor"""
_lowerCAmelCase = getattr(self.backend_tokenizer , lowerCamelCase , lowerCamelCase )
if tokenizer_component_instance:
_lowerCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCAmelCase = tuple(state["""sep"""] )
if "cls" in state:
_lowerCAmelCase = tuple(state["""cls"""] )
_lowerCAmelCase = False
if state.get("""add_prefix_space""" , lowerCamelCase ) != add_prefix_space:
_lowerCAmelCase = add_prefix_space
_lowerCAmelCase = True
if state.get("""trim_offsets""" , lowerCamelCase ) != trim_offsets:
_lowerCAmelCase = trim_offsets
_lowerCAmelCase = True
if changes_to_apply:
_lowerCAmelCase = getattr(lowerCamelCase , state.pop("""type""" ) )
_lowerCAmelCase = component_class(**lowerCamelCase )
setattr(self.backend_tokenizer , lowerCamelCase , lowerCamelCase )
@property
def A__ (self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else value
_lowerCAmelCase = value
def A__ (self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = kwargs.get("""is_split_into_words""" , lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def A__ (self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = kwargs.get("""is_split_into_words""" , lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase = None ):
'''simple docstring'''
_lowerCAmelCase = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ (self , lowerCamelCase , lowerCamelCase = None ):
'''simple docstring'''
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 317 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__(self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=None , ):
'''simple docstring'''
_lowerCAmelCase = size if size is not None else {"""shortest_edge""": 20}
_lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
def A__ (self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = MobileNetVaImageProcessor if is_vision_available() else None
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = MobileNetVaImageProcessingTester(self )
@property
def A__ (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase , """crop_size""" ) )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def A__ (self ):
'''simple docstring'''
pass
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , ) | 317 | 1 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=True ):
model.train()
SCREAMING_SNAKE_CASE_: Any = model(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = F.mse_loss(_UpperCAmelCase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_UpperCAmelCase )
def A_ ( _UpperCAmelCase , _UpperCAmelCase=False ):
set_seed(42 )
SCREAMING_SNAKE_CASE_: Tuple = RegressionModel()
SCREAMING_SNAKE_CASE_: Optional[int] = deepcopy(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE_: Union[str, Any] = DataLoader(_UpperCAmelCase , batch_size=16 )
model.to(accelerator.device )
if sched:
SCREAMING_SNAKE_CASE_: List[Any] = AdamW(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_: str = AdamW(params=ddp_model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_: Union[str, Any] = LambdaLR(_UpperCAmelCase , lr_lambda=lambda _UpperCAmelCase : epoch**0.6_5 )
SCREAMING_SNAKE_CASE_: List[Any] = LambdaLR(_UpperCAmelCase , lr_lambda=lambda _UpperCAmelCase : epoch**0.6_5 )
# Make a copy of `model`
if sched:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def A_ ( _UpperCAmelCase ):
# Test when on a single CPU or GPU that the context manager does nothing
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = get_training_setup(_UpperCAmelCase )
# Use a single batch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = next(iter(_UpperCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_UpperCAmelCase ):
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
# Sync grads
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
SCREAMING_SNAKE_CASE_: Optional[Any] = ddp_input[torch.randperm(len(_UpperCAmelCase ) )]
def A_ ( _UpperCAmelCase ):
# Test on distributed setup that context manager behaves properly
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = get_training_setup(_UpperCAmelCase )
# Use a single batch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = next(iter(_UpperCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_UpperCAmelCase ):
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
# Sync grads
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
SCREAMING_SNAKE_CASE_: Any = ddp_input[torch.randperm(len(_UpperCAmelCase ) )]
def A_ ( _UpperCAmelCase=False , _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_: List[str] = Accelerator(
split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = get_training_setup(_UpperCAmelCase )
for iteration, batch in enumerate(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_UpperCAmelCase ):
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_UpperCAmelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
SCREAMING_SNAKE_CASE_: Dict = ddp_input[torch.randperm(len(_UpperCAmelCase ) )]
GradientState._reset_state()
def A_ ( _UpperCAmelCase=False , _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_: List[Any] = Accelerator(
split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = get_training_setup(_UpperCAmelCase , _UpperCAmelCase )
for iteration, batch in enumerate(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_UpperCAmelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_UpperCAmelCase ):
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"
SCREAMING_SNAKE_CASE_: Optional[Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_UpperCAmelCase ))
if accelerator.num_processes > 1:
check_model_parameters(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def A_ ( ):
SCREAMING_SNAKE_CASE_: List[Any] = Accelerator()
SCREAMING_SNAKE_CASE_: Dict = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE_: Optional[Any] = DataLoader(_UpperCAmelCase , batch_size=16 )
SCREAMING_SNAKE_CASE_: Any = RegressionDataset(length=96 )
SCREAMING_SNAKE_CASE_: Optional[Any] = DataLoader(_UpperCAmelCase , batch_size=16 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_UpperCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_UpperCAmelCase )
if iteration < len(_UpperCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_UpperCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_UpperCAmelCase )
if batch_num < len(_UpperCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def A_ ( ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = Accelerator()
SCREAMING_SNAKE_CASE_: int = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(_UpperCAmelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(_UpperCAmelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation(_UpperCAmelCase , _UpperCAmelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation_with_opt_and_scheduler(_UpperCAmelCase , _UpperCAmelCase )
def A_ ( _UpperCAmelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 13 |
A__ = [0, 2, 4, 6, 8]
A__ = [1, 3, 5, 7, 9]
def _UpperCAmelCase ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
_lowerCAmelCase = 0
for digit in range(10 ):
_lowerCAmelCase = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , snake_case , snake_case )
return result
_lowerCAmelCase = 0
for digita in range(10 ):
_lowerCAmelCase = digita
if (remainder + digita) % 2 == 0:
_lowerCAmelCase = ODD_DIGITS
else:
_lowerCAmelCase = EVEN_DIGITS
for digita in other_parity_digits:
_lowerCAmelCase = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , snake_case , snake_case , )
return result
def _UpperCAmelCase ( snake_case = 9 ):
"""simple docstring"""
_lowerCAmelCase = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(snake_case , 0 , [0] * length , snake_case )
return result
if __name__ == "__main__":
print(f"{solution() = }")
| 82 | 0 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=True , lowerCAmelCase="pt" ) -> str:
UpperCAmelCase__ : List[Any] = {"""add_prefix_space""": True} if isinstance(A__ , A__ ) and not line.startswith(""" """ ) else {}
UpperCAmelCase__ : Optional[Any] = padding_side
return tokenizer(
[line] , max_length=A__ , padding="""max_length""" if pad_to_max_length else None , truncation=A__ , return_tensors=A__ , add_special_tokens=A__ , **A__ , )
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , ) -> List[str]:
UpperCAmelCase__ : Dict = input_ids.ne(A__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowerCamelCase ( __lowerCamelCase ):
def __init__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="train" , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="" , ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ : Optional[int] = Path(UpperCamelCase_ ).joinpath(type_path + """.source""" )
UpperCAmelCase__ : str = Path(UpperCamelCase_ ).joinpath(type_path + """.target""" )
UpperCAmelCase__ : Optional[int] = self.get_char_lens(self.src_file )
UpperCAmelCase__ : str = max_source_length
UpperCAmelCase__ : Any = max_target_length
assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}"""
UpperCAmelCase__ : int = tokenizer
UpperCAmelCase__ : Any = prefix
if n_obs is not None:
UpperCAmelCase__ : List[Any] = self.src_lens[:n_obs]
UpperCAmelCase__ : Optional[Any] = src_lang
UpperCAmelCase__ : int = tgt_lang
def __len__(self ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__(self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = index + 1 # linecache starts at 1
UpperCAmelCase__ : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) , UpperCamelCase_ ).rstrip("""\n""" )
UpperCAmelCase__ : Tuple = linecache.getline(str(self.tgt_file ) , UpperCamelCase_ ).rstrip("""\n""" )
assert source_line, F"""empty source line for index {index}"""
assert tgt_line, F"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , UpperCamelCase_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
UpperCAmelCase__ : str = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , UpperCamelCase_ ) else self.tokenizer
)
UpperCAmelCase__ : Dict = self.tokenizer.generator if isinstance(self.tokenizer , UpperCamelCase_ ) else self.tokenizer
UpperCAmelCase__ : List[Any] = encode_line(UpperCamelCase_ , UpperCamelCase_ , self.max_source_length , """right""" )
UpperCAmelCase__ : Dict = encode_line(UpperCamelCase_ , UpperCamelCase_ , self.max_target_length , """right""" )
UpperCAmelCase__ : List[Any] = source_inputs["""input_ids"""].squeeze()
UpperCAmelCase__ : Optional[int] = target_inputs["""input_ids"""].squeeze()
UpperCAmelCase__ : int = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _a (_lowerCamelCase ):
"""simple docstring"""
return [len(UpperCamelCase_ ) for x in Path(UpperCamelCase_ ).open().readlines()]
def _a (self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : int = torch.stack([x["""input_ids"""] for x in batch] )
UpperCAmelCase__ : str = torch.stack([x["""attention_mask"""] for x in batch] )
UpperCAmelCase__ : Optional[Any] = torch.stack([x["""decoder_input_ids"""] for x in batch] )
UpperCAmelCase__ : List[Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , UpperCamelCase_ )
else self.tokenizer.pad_token_id
)
UpperCAmelCase__ : List[str] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , UpperCamelCase_ )
else self.tokenizer.pad_token_id
)
UpperCAmelCase__ : Dict = trim_batch(UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase__ , UpperCAmelCase__ : Any = trim_batch(UpperCamelCase_ , UpperCamelCase_ , attention_mask=UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
_A = getLogger(__name__)
def a__ ( lowerCAmelCase ) -> Any:
return list(itertools.chain.from_iterable(A__ ) )
def a__ ( lowerCAmelCase ) -> Dict:
UpperCAmelCase__ : Dict = get_git_info()
save_json(A__ , os.path.join(A__ , """git_log.json""" ) )
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=4 , **lowerCAmelCase ) -> str:
with open(A__ , """w""" ) as f:
json.dump(A__ , A__ , indent=A__ , **A__ )
def a__ ( lowerCAmelCase ) -> Tuple:
with open(A__ ) as f:
return json.load(A__ )
def a__ ( ) -> List[str]:
UpperCAmelCase__ : int = git.Repo(search_parent_directories=A__ )
UpperCAmelCase__ : List[Any] = {
"""repo_id""": str(A__ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Union[str, Any]:
return list(map(A__ , A__ ) )
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
with open(A__ , """wb""" ) as f:
return pickle.dump(A__ , A__ )
def a__ ( lowerCAmelCase ) -> Optional[int]:
def remove_articles(lowerCAmelCase ):
return re.sub(R"""\b(a|an|the)\b""" , """ """ , A__ )
def white_space_fix(lowerCAmelCase ):
return " ".join(text.split() )
def remove_punc(lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A__ ) ) ) )
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Dict:
UpperCAmelCase__ : Optional[Any] = normalize_answer(A__ ).split()
UpperCAmelCase__ : int = normalize_answer(A__ ).split()
UpperCAmelCase__ : List[Any] = Counter(A__ ) & Counter(A__ )
UpperCAmelCase__ : Dict = sum(common.values() )
if num_same == 0:
return 0
UpperCAmelCase__ : str = 1.0 * num_same / len(A__ )
UpperCAmelCase__ : Optional[Any] = 1.0 * num_same / len(A__ )
UpperCAmelCase__ : Union[str, Any] = (2 * precision * recall) / (precision + recall)
return fa
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
return normalize_answer(A__ ) == normalize_answer(A__ )
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Tuple:
assert len(A__ ) == len(A__ )
UpperCAmelCase__ : Optional[int] = 0
for hypo, pred in zip(A__ , A__ ):
em += exact_match_score(A__ , A__ )
if len(A__ ) > 0:
em /= len(A__ )
return {"em": em}
def a__ ( lowerCAmelCase ) -> Dict:
return model_prefix.startswith("""rag""" )
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Tuple:
UpperCAmelCase__ : Union[str, Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
UpperCAmelCase__ : str = """dropout_rate"""
for p in extra_params:
if getattr(A__ , A__ , A__ ):
if not hasattr(A__ , A__ ) and not hasattr(A__ , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(A__ ) )
delattr(A__ , A__ )
continue
UpperCAmelCase__ : str = p if hasattr(A__ , A__ ) else equivalent_param[p]
setattr(A__ , A__ , getattr(A__ , A__ ) )
delattr(A__ , A__ )
return hparams, config
| 357 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCamelCase ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def _a (self ):
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=_lowerCamelCase , )
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_lowerCamelCase )
class lowerCamelCase ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def _a (self ):
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=_lowerCamelCase , )
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_lowerCamelCase )
def a__ ( ) -> Tuple:
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def a__ ( ) -> List[str]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@require_beam
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Dict = DummyBeamDataset(cache_dir=_lowerCamelCase , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_lowerCamelCase , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
UpperCAmelCase__ : List[Any] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , _lowerCamelCase )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , _lowerCamelCase )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_lowerCamelCase , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def _a (self ):
"""simple docstring"""
import apache_beam as beam
UpperCAmelCase__ : Optional[int] = beam.io.parquetio.WriteToParquet
UpperCAmelCase__ : int = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Any = DummyBeamDataset(cache_dir=_lowerCamelCase , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
UpperCAmelCase__ : int = partial(_lowerCamelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_lowerCamelCase , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_lowerCamelCase , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
UpperCAmelCase__ : Union[str, Any] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , _lowerCamelCase )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , _lowerCamelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(_lowerCamelCase , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def _a (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Union[str, Any] = DummyBeamDataset(cache_dir=_lowerCamelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Optional[Any] = NestedBeamDataset(cache_dir=_lowerCamelCase , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_lowerCamelCase , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
UpperCAmelCase__ : str = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , _lowerCamelCase )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , _lowerCamelCase )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_lowerCamelCase , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 166 | 0 |
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def a__ ( SCREAMING_SNAKE_CASE : Dataset , SCREAMING_SNAKE_CASE : Dict[str, str] ):
'''simple docstring'''
lowerCAmelCase : Dict = args.log_outputs
lowerCAmelCase : List[str] = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
lowerCAmelCase : int = load_metric("wer" )
lowerCAmelCase : str = load_metric("cer" )
# compute metrics
lowerCAmelCase : List[Any] = wer.compute(references=result["target"] , predictions=result["prediction"] )
lowerCAmelCase : str = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
lowerCAmelCase : List[str] = f"""WER: {wer_result}\nCER: {cer_result}"""
print(SCREAMING_SNAKE_CASE )
with open(f"""{dataset_id}_eval_results.txt""" , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowerCAmelCase : Optional[Any] = f"""log_{dataset_id}_predictions.txt"""
lowerCAmelCase : Tuple = f"""log_{dataset_id}_targets.txt"""
with open(SCREAMING_SNAKE_CASE , "w" ) as p, open(SCREAMING_SNAKE_CASE , "w" ) as t:
# mapping function to write output
def write_to_file(SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] ):
p.write(f"""{i}""" + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(f"""{i}""" + "\n" )
t.write(batch["target"] + "\n" )
result.map(SCREAMING_SNAKE_CASE , with_indices=SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : List[str] = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowerCAmelCase : List[str] = re.sub(SCREAMING_SNAKE_CASE , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowerCAmelCase : Dict = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
lowerCAmelCase : List[str] = " ".join(text.split(SCREAMING_SNAKE_CASE ) )
return text
def a__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase : Any = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=SCREAMING_SNAKE_CASE )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowerCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained(args.model_id )
lowerCAmelCase : Dict = feature_extractor.sampling_rate
# resample audio
lowerCAmelCase : Any = dataset.cast_column("audio" , Audio(sampling_rate=SCREAMING_SNAKE_CASE ) )
# load eval pipeline
if args.device is None:
lowerCAmelCase : int = 0 if torch.cuda.is_available() else -1
lowerCAmelCase : Union[str, Any] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(SCREAMING_SNAKE_CASE : Optional[int] ):
lowerCAmelCase : Union[str, Any] = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowerCAmelCase : List[Any] = prediction["text"]
lowerCAmelCase : int = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
lowerCAmelCase : List[str] = dataset.map(SCREAMING_SNAKE_CASE , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
lowerCAmelCase__ = parser.parse_args()
main(args)
| 108 |
import argparse
import datetime
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> str:
'''simple docstring'''
__UpperCamelCase : str = {
"0": "Sunday",
"1": "Monday",
"2": "Tuesday",
"3": "Wednesday",
"4": "Thursday",
"5": "Friday",
"6": "Saturday",
}
__UpperCamelCase : List[str] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(_lowerCamelCase) < 11:
raise ValueError("Must be 10 characters long")
# Get month
__UpperCamelCase : int = int(date_input[0] + date_input[1])
# Validate
if not 0 < m < 13:
raise ValueError("Month must be between 1 - 12")
__UpperCamelCase : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'")
# Get day
__UpperCamelCase : int = int(date_input[3] + date_input[4])
# Validate
if not 0 < d < 32:
raise ValueError("Date must be between 1 - 31")
# Get second separator
__UpperCamelCase : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'")
# Get year
__UpperCamelCase : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9])
# Arbitrary year range
if not 45 < y < 8_500:
raise ValueError(
"Year out of range. There has to be some sort of limit...right?")
# Get datetime obj for validation
__UpperCamelCase : Union[str, Any] = datetime.date(int(_lowerCamelCase) , int(_lowerCamelCase) , int(_lowerCamelCase))
# Start math
if m <= 2:
__UpperCamelCase : Any = y - 1
__UpperCamelCase : Optional[Any] = m + 12
# maths var
__UpperCamelCase : int = int(str(_lowerCamelCase)[:2])
__UpperCamelCase : int = int(str(_lowerCamelCase)[2:])
__UpperCamelCase : int = int(2.6 * m - 5.3_9)
__UpperCamelCase : int = int(c / 4)
__UpperCamelCase : int = int(k / 4)
__UpperCamelCase : int = int(d + k)
__UpperCamelCase : int = int(t + u + v + x)
__UpperCamelCase : int = int(z - (2 * c))
__UpperCamelCase : int = round(w % 7)
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("The date was evaluated incorrectly. Contact developer.")
# Response
__UpperCamelCase : str = F'Your date {date_input}, is a {days[str(_lowerCamelCase)]}!'
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase : Optional[int] = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
lowercase : Tuple = parser.parse_args()
zeller(args.date_input) | 232 | 0 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger('transformers.models.speecht5')
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
hf_model.apply_weight_norm()
UpperCAmelCase__ = checkpoint["""input_conv.weight_g"""]
UpperCAmelCase__ = checkpoint["""input_conv.weight_v"""]
UpperCAmelCase__ = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCAmelCase__ = checkpoint[F'''upsamples.{i}.1.weight_g''']
UpperCAmelCase__ = checkpoint[F'''upsamples.{i}.1.weight_v''']
UpperCAmelCase__ = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCAmelCase__ = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
UpperCAmelCase__ = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
UpperCAmelCase__ = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
UpperCAmelCase__ = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
UpperCAmelCase__ = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
UpperCAmelCase__ = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
UpperCAmelCase__ = checkpoint["""output_conv.1.weight_g"""]
UpperCAmelCase__ = checkpoint["""output_conv.1.weight_v"""]
UpperCAmelCase__ = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Any=None , ):
'''simple docstring'''
if config_path is not None:
UpperCAmelCase__ = SpeechTaHifiGanConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
UpperCAmelCase__ = SpeechTaHifiGanConfig()
UpperCAmelCase__ = SpeechTaHifiGan(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = torch.load(SCREAMING_SNAKE_CASE__ )
load_weights(orig_checkpoint["""model"""]["""generator"""] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = np.load(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = stats[0].reshape(-1 )
UpperCAmelCase__ = stats[1].reshape(-1 )
UpperCAmelCase__ = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).float()
UpperCAmelCase__ = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).float()
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
UpperCAmelCase_ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 61 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : int = GPTSanJapaneseTokenizer
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : Union[str, Any] = {"""do_clean_text""": False, """add_prefix_space""": False}
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCAmelCase__ = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
UpperCAmelCase__ = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
UpperCAmelCase__ = {"""unk_token""": """<unk>"""}
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , **_UpperCAmelCase : Optional[int] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Any ):
"""simple docstring"""
UpperCAmelCase__ = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
UpperCAmelCase__ = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : str ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.get_input_output_texts(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
return text, ids
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizer()
# Testing tokenization
UpperCAmelCase__ = """こんにちは、世界。 こんばんは、㔺界。"""
UpperCAmelCase__ = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
UpperCAmelCase__ = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing conversion to ids without special tokens
UpperCAmelCase__ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing conversion to ids with special tokens
UpperCAmelCase__ = tokens + [tokenizer.unk_token]
UpperCAmelCase__ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizer()
# Testing tokenization
UpperCAmelCase__ = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
UpperCAmelCase__ = """こんにちは、、、、世界。こんばんは、、、、世界。"""
UpperCAmelCase__ = tokenizer.encode(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
UpperCAmelCase__ = """こんにちは、世界。"""
UpperCAmelCase__ = """こんばんは、㔺界。😀"""
UpperCAmelCase__ = """こんにちは、世界。こんばんは、世界。😀"""
UpperCAmelCase__ = tokenizer.encode(prefix_text + input_text )
UpperCAmelCase__ = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
UpperCAmelCase__ = tokenizer.encode(_UpperCAmelCase , prefix_text=_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
UpperCAmelCase__ = """こんにちは、世界。"""
UpperCAmelCase__ = """こんばんは、㔺界。😀"""
UpperCAmelCase__ = len(tokenizer.encode(_UpperCAmelCase ) ) - 2
UpperCAmelCase__ = len(tokenizer.encode(_UpperCAmelCase ) ) - 2
UpperCAmelCase__ = [1] + [0] * (len_prefix + len_text + 1)
UpperCAmelCase__ = [1] * (len_prefix + len_text + 1) + [0]
UpperCAmelCase__ = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
UpperCAmelCase__ = tokenizer(prefix_text + input_text ).token_type_ids
UpperCAmelCase__ = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
UpperCAmelCase__ = tokenizer(_UpperCAmelCase , prefix_text=_UpperCAmelCase ).token_type_ids
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
UpperCAmelCase__ = tokenizer.encode("""あンいワ""" )
UpperCAmelCase__ = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
UpperCAmelCase__ = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(_UpperCAmelCase ) , tokenizer.decode(_UpperCAmelCase ) )
self.assertEqual(tokenizer.decode(_UpperCAmelCase ) , tokenizer.decode(_UpperCAmelCase ) )
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
UpperCAmelCase__ = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
UpperCAmelCase__ = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.batch_encode_plus(_UpperCAmelCase , padding=_UpperCAmelCase )
# fmt: off
UpperCAmelCase__ = [[3_59_93, 86_40, 2_59_48, 3_59_98, 3_06_47, 3_56_75, 3_59_99, 3_59_99], [3_59_93, 1_03_82, 98_68, 3_59_98, 3_06_46, 94_59, 3_06_46, 3_56_75]]
UpperCAmelCase__ = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
UpperCAmelCase__ = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , _UpperCAmelCase )
self.assertListEqual(x_token.token_type_ids , _UpperCAmelCase )
self.assertListEqual(x_token.attention_mask , _UpperCAmelCase )
self.assertListEqual(x_token_a.input_ids , _UpperCAmelCase )
self.assertListEqual(x_token_a.token_type_ids , _UpperCAmelCase )
self.assertListEqual(x_token_a.attention_mask , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
pass
| 61 | 1 |
'''simple docstring'''
import os
import string
import sys
UpperCAmelCase : Union[str, Any] = 1 << 8
UpperCAmelCase : Any = {
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 2_7,
'up': 6_5 + ARROW_KEY_FLAG,
'down': 6_6 + ARROW_KEY_FLAG,
'right': 6_7 + ARROW_KEY_FLAG,
'left': 6_8 + ARROW_KEY_FLAG,
'mod_int': 9_1,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 5_0,
'delete': 5_1,
'pg_up': 5_3,
'pg_down': 5_4,
}
UpperCAmelCase : Optional[Any] = KEYMAP['up']
UpperCAmelCase : Optional[int] = KEYMAP['left']
if sys.platform == "win32":
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : Dict = {
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(1_0):
UpperCAmelCase : Tuple = ord(str(i))
def a__ ( ):
"""simple docstring"""
if os.name == "nt":
import msvcrt
__SCREAMING_SNAKE_CASE = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(a__ ) == 0:
# Read the keystroke
__SCREAMING_SNAKE_CASE = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__SCREAMING_SNAKE_CASE = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__SCREAMING_SNAKE_CASE = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(a__ )
if ord(a__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_26 ) )
__SCREAMING_SNAKE_CASE = chr(KEYMAP["""esc"""] )
except KeyError:
__SCREAMING_SNAKE_CASE = cha[1]
else:
__SCREAMING_SNAKE_CASE = ch.decode(a__ )
else:
__SCREAMING_SNAKE_CASE = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
__SCREAMING_SNAKE_CASE = sys.stdin.fileno()
__SCREAMING_SNAKE_CASE = termios.tcgetattr(a__ )
try:
tty.setraw(a__ )
__SCREAMING_SNAKE_CASE = sys.stdin.read(1 )
finally:
termios.tcsetattr(a__ , termios.TCSADRAIN , a__ )
return ch
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(a__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(a__ ) == KEYMAP["esc"]:
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(a__ ) == KEYMAP["mod_int"]:
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(a__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(a__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(a__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 267 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
# fmt: off
__SCREAMING_SNAKE_CASE = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
__SCREAMING_SNAKE_CASE = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
__SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
__SCREAMING_SNAKE_CASE = {"""unk_token""": """<unk>"""}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , __SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> str:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple , **__SCREAMING_SNAKE_CASE : Any ) -> int:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[Any] , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> List[str]:
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
__SCREAMING_SNAKE_CASE = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="""np""" )
__SCREAMING_SNAKE_CASE = processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = processor(text=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer(__SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE ):
processor()
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE = processor.batch_decode(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 267 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
a : Dict = '''2020.9.26'''
a : Tuple = '''xcodz-dot, cclaus, dhruvmanila'''
def lowercase__(A , A , A , A , A ) ->int:
"""simple docstring"""
if not all(isinstance(A_ , (float, int) ) for val in locals().values() ):
lowercase__ : Optional[Any]= f'''Input values must either be float or int: {list(locals().values() )}'''
raise TypeError(A_ )
lowercase__ : Optional[Any]= ((x * distance) / (z + distance)) * scale
lowercase__ : Optional[int]= ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def lowercase__(A , A , A , A , A ) ->int:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise TypeError("Axis must be a str" )
lowercase__ : str= locals()
del input_variables["axis"]
if not all(isinstance(A_ , (float, int) ) for val in input_variables.values() ):
lowercase__ : int= (
'''Input values except axis must either be float or int: '''
f'''{list(input_variables.values() )}'''
)
raise TypeError(A_ )
lowercase__ : Any= (angle % 360) / 450 * 180 / math.pi
if axis == "z":
lowercase__ : Tuple= x * math.cos(A_ ) - y * math.sin(A_ )
lowercase__ : List[str]= y * math.cos(A_ ) + x * math.sin(A_ )
lowercase__ : Optional[Any]= z
elif axis == "x":
lowercase__ : List[str]= y * math.cos(A_ ) - z * math.sin(A_ )
lowercase__ : str= z * math.cos(A_ ) + y * math.sin(A_ )
lowercase__ : Union[str, Any]= x
elif axis == "y":
lowercase__ : Optional[int]= x * math.cos(A_ ) - z * math.sin(A_ )
lowercase__ : Tuple= z * math.cos(A_ ) + x * math.sin(A_ )
lowercase__ : Optional[int]= y
else:
raise ValueError("not a valid axis, choose one of \'x\', \'y\', \'z\'" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }""")
| 358 |
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def lowercase__(A ) ->Dict:
"""simple docstring"""
lowercase__ : Dict= checkpoints.load_tax_checkpoint(A )
lowercase__ : List[Any]= flatten_dict(A )
return flax_params
def lowercase__(A ) ->Tuple:
"""simple docstring"""
lowercase__ : Union[str, Any]= {}
lowercase__ : Any= {
"token_embedder": "embeddings",
"encoder_norm": "layernorm",
"kernel": "weight",
".out": ".output",
"scale": "weight",
"embedders_0.pos_embedding": "row_embedder.weight",
"embedders_1.pos_embedding": "column_embedder.weight",
}
lowercase__ : Tuple= {
"query": "attention.query",
"key": "attention.key",
"value": "attention.value",
"output.dense": "output",
"encoder_decoder_attention.o": "encoder_decoder_attention.attention.o",
"pre_self_attention_layer_norm": "self_attention.layer_norm",
"pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm",
"mlp.": "mlp.DenseReluDense.",
"pre_mlp_layer_norm": "mlp.layer_norm",
"self_attention.o": "self_attention.attention.o",
"decoder.embeddings.embedding": "decoder.embed_tokens.weight",
"decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight",
"decoder.decoder_norm.weight": "decoder.final_layer_norm.weight",
"decoder.logits_dense.weight": "decoder.lm_head.weight",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowercase__ : Any= ".".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowercase__ : List[Any]= new_key.replace(A , A )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowercase__ : List[str]= new_key.replace(A , A )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowercase__ : List[str]= re.sub(R"layers_(\d+)" , R"layer.\1" , A )
lowercase__ : Any= new_key.replace("encoder" , "encoder.encoder" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowercase__ : str= re.sub(R"layers_(\d+)" , R"layer.\1" , A )
lowercase__ : Dict= flax_dict[key]
lowercase__ : str= {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowercase__ : Dict= torch.from_numpy(converted_dict[key].T )
else:
lowercase__ : Any= torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def lowercase__(A , A , A=False , A=False ) ->Union[str, Any]:
"""simple docstring"""
lowercase__ : Tuple= get_flax_param(A )
if not use_large:
lowercase__ : int= PixaStructVisionConfig()
lowercase__ : str= PixaStructTextConfig()
else:
lowercase__ : List[Any]= PixaStructVisionConfig(
hidden_size=1_536 , d_ff=3_968 , num_attention_heads=24 , num_hidden_layers=18 )
lowercase__ : Optional[Any]= PixaStructTextConfig(hidden_size=1_536 , d_ff=3_968 , num_heads=24 , num_layers=18 )
lowercase__ : Any= PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=A )
lowercase__ : Optional[Any]= PixaStructForConditionalGeneration(A )
lowercase__ : Tuple= rename_and_convert_flax_params(A )
model.load_state_dict(A )
lowercase__ : List[str]= AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer" )
lowercase__ : List[str]= PixaStructImageProcessor()
lowercase__ : Dict= PixaStructProcessor(image_processor=A , tokenizer=A )
if use_large:
lowercase__ : Tuple= 4_096
lowercase__ : List[Any]= True
# mkdir if needed
os.makedirs(A , exist_ok=A )
model.save_pretrained(A )
processor.save_pretrained(A )
print("Model saved in {}".format(A ) )
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""")
parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""")
a : Optional[int] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 150 | 0 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __lowerCamelCase ( __a :Optional[Any] ) -> str: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
with parallel_backend("""spark""" ):
assert ParallelBackendConfig.backend_name == "spark"
A__ = [1, 2, 3]
with pytest.raises(__a ):
with parallel_backend("""unsupported backend""" ):
map_nested(__a , __a , num_proc=2 )
with pytest.raises(__a ):
with parallel_backend("""unsupported backend""" ):
map_nested(__a , __a , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("""num_proc""" , [2, -1] )
def __lowerCamelCase ( __a :str ) -> Tuple:
"""simple docstring"""
A__ = [1, 2]
A__ = {"""a""": 1, """b""": 2}
A__ = {"""a""": [1, 2], """b""": [3, 4]}
A__ = {"""a""": {"""1""": 1}, """b""": 2}
A__ = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
A__ = [2, 3]
A__ = {"""a""": 2, """b""": 3}
A__ = {"""a""": [2, 3], """b""": [4, 5]}
A__ = {"""a""": {"""1""": 2}, """b""": 3}
A__ = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
with parallel_backend("""spark""" ):
assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa
assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa
assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa
assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa
assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa
| 274 |
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
pass
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
pass
class A :
'''simple docstring'''
def __init__( self : List[Any] ) -> str:
"""simple docstring"""
A__ = [
[],
[],
[],
]
def a_ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None:
"""simple docstring"""
try:
if len(self.queues[priority] ) >= 1_00:
raise OverflowError("""Maximum queue size is 100""" )
self.queues[priority].append(__lowerCAmelCase )
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""" )
def a_ ( self : Optional[Any] ) -> int:
"""simple docstring"""
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("""All queues are empty""" )
def __str__( self : Tuple ) -> str:
"""simple docstring"""
return "\n".join(f'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class A :
'''simple docstring'''
def __init__( self : int ) -> str:
"""simple docstring"""
A__ = []
def a_ ( self : int , __lowerCAmelCase : int ) -> None:
"""simple docstring"""
if len(self.queue ) == 1_00:
raise OverFlowError("""Maximum queue size is 100""" )
self.queue.append(__lowerCAmelCase )
def a_ ( self : List[str] ) -> int:
"""simple docstring"""
if not self.queue:
raise UnderFlowError("""The queue is empty""" )
else:
A__ = min(self.queue )
self.queue.remove(__lowerCAmelCase )
return data
def __str__( self : List[Any] ) -> str:
"""simple docstring"""
return str(self.queue )
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
A__ = FixedPriorityQueue()
fpq.enqueue(0 , 1_0 )
fpq.enqueue(1 , 7_0 )
fpq.enqueue(0 , 1_0_0 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 6_4 )
fpq.enqueue(0 , 1_2_8 )
print(__a )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(__a )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
A__ = ElementPriorityQueue()
epq.enqueue(1_0 )
epq.enqueue(7_0 )
epq.enqueue(1_0_0 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(6_4 )
epq.enqueue(1_2_8 )
print(__a )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(__a )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 274 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = StableUnCLIPImgaImgPipeline
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
snake_case_ = frozenset([] )
def UpperCamelCase_ ( self : List[Any] ):
__A = 32
__A = embedder_hidden_size
# image encoding components
__A = CLIPImageProcessor(crop_size=32 ,size=32 )
torch.manual_seed(0 )
__A = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=A ,projection_dim=A ,num_hidden_layers=5 ,num_attention_heads=4 ,image_size=32 ,intermediate_size=37 ,patch_size=1 ,) )
# regular denoising components
torch.manual_seed(0 )
__A = StableUnCLIPImageNormalizer(embedding_dim=A )
__A = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__A = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=A ,projection_dim=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,) )
torch.manual_seed(0 )
__A = UNetaDConditionModel(
sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") ,up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") ,block_out_channels=(32, 64) ,attention_head_dim=(2, 4) ,class_embed_type="projection" ,projection_class_embeddings_input_dim=embedder_projection_dim * 2 ,cross_attention_dim=A ,layers_per_block=1 ,upcast_attention=A ,use_linear_projection=A ,)
torch.manual_seed(0 )
__A = DDIMScheduler(
beta_schedule="scaled_linear" ,beta_start=0.0_00_85 ,beta_end=0.0_12 ,prediction_type="v_prediction" ,set_alpha_to_one=A ,steps_offset=1 ,)
torch.manual_seed(0 )
__A = AutoencoderKL()
__A = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def UpperCamelCase_ ( self : str ,A : Optional[int] ,A : List[Any]=0 ,A : Any=True ):
if str(A ).startswith("mps" ):
__A = torch.manual_seed(A )
else:
__A = torch.Generator(device=A ).manual_seed(A )
__A = floats_tensor((1, 3, 32, 32) ,rng=random.Random(A ) ).to(A )
if pil_image:
__A = input_image * 0.5 + 0.5
__A = input_image.clamp(0 ,1 )
__A = input_image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
__A = DiffusionPipeline.numpy_to_pil(A )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCamelCase_ ( self : Any ):
__A = "cpu" # ensure determinism for the device-dependent torch.Generator
__A = self.get_dummy_components()
__A = StableUnCLIPImgaImgPipeline(**A )
__A = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
__A = self.get_dummy_inputs(A )
inputs.update({"image_embeds": None} )
__A = sd_pipe(**A ).images
__A = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__A = np.array([0.38_72, 0.72_24, 0.56_01, 0.47_41, 0.68_72, 0.58_14, 0.46_36, 0.38_67, 0.50_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase_ ( self : List[Any] ):
__A = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=A )
def UpperCamelCase_ ( self : Dict ):
__A = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=A )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def UpperCamelCase_ ( self : Optional[int] ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=A )
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : Dict ):
__A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
__A = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" ,torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__A = torch.Generator(device="cpu" ).manual_seed(0 )
__A = pipe(A ,"anime turle" ,generator=A ,output_type="np" )
__A = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(A ,A )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
__A = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" ,torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__A = torch.Generator(device="cpu" ).manual_seed(0 )
__A = pipe(A ,"anime turle" ,generator=A ,output_type="np" )
__A = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(A ,A )
def UpperCamelCase_ ( self : Dict ):
__A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__A = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" ,torch_dtype=torch.floataa )
__A = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__A = pipe(
A ,"anime turtle" ,num_inference_steps=2 ,output_type="np" ,)
__A = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 124 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case_ = Features({"audio": Audio()} )
snake_case_ = Features({"transcription": Value("string" )} )
snake_case_ = "audio"
snake_case_ = "transcription"
def UpperCamelCase_ ( self : Optional[int] ,A : Union[str, Any] ):
if self.audio_column not in features:
raise ValueError(f'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] ,A ):
raise ValueError(f'''Column {self.audio_column} is not an Audio type.''' )
__A = copy.deepcopy(self )
__A = self.input_schema.copy()
__A = features[self.audio_column]
__A = input_schema
return task_template
@property
def UpperCamelCase_ ( self : int ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 124 | 1 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=True , __UpperCamelCase="pt" ):
"""simple docstring"""
__A = {'''add_prefix_space''': True} if isinstance(__UpperCamelCase , __UpperCamelCase ) and not line.startswith(''' ''' ) else {}
__A = padding_side
return tokenizer(
[line] , max_length=__UpperCamelCase , padding='''max_length''' if pad_to_max_length else None , truncation=__UpperCamelCase , return_tensors=__UpperCamelCase , add_special_tokens=__UpperCamelCase , **__UpperCamelCase , )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , ):
"""simple docstring"""
__A = input_ids.ne(__UpperCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple, _lowerCamelCase : List[str], _lowerCamelCase : int, _lowerCamelCase : Tuple, _lowerCamelCase : Tuple, _lowerCamelCase : str="train", _lowerCamelCase : Dict=None, _lowerCamelCase : int=None, _lowerCamelCase : Any=None, _lowerCamelCase : Union[str, Any]="", ):
'''simple docstring'''
super().__init__()
__A = Path(_lowerCamelCase ).joinpath(type_path + '''.source''' )
__A = Path(_lowerCamelCase ).joinpath(type_path + '''.target''' )
__A = self.get_char_lens(self.src_file )
__A = max_source_length
__A = max_target_length
assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}'
__A = tokenizer
__A = prefix
if n_obs is not None:
__A = self.src_lens[:n_obs]
__A = src_lang
__A = tgt_lang
def __len__( self : Optional[Any] ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : List[str], _lowerCamelCase : Dict ):
'''simple docstring'''
__A = index + 1 # linecache starts at 1
__A = self.prefix + linecache.getline(str(self.src_file ), _lowerCamelCase ).rstrip('''\n''' )
__A = linecache.getline(str(self.tgt_file ), _lowerCamelCase ).rstrip('''\n''' )
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer, _lowerCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__A = (
self.tokenizer.question_encoder if isinstance(self.tokenizer, _lowerCamelCase ) else self.tokenizer
)
__A = self.tokenizer.generator if isinstance(self.tokenizer, _lowerCamelCase ) else self.tokenizer
__A = encode_line(_lowerCamelCase, _lowerCamelCase, self.max_source_length, '''right''' )
__A = encode_line(_lowerCamelCase, _lowerCamelCase, self.max_target_length, '''right''' )
__A = source_inputs['''input_ids'''].squeeze()
__A = target_inputs['''input_ids'''].squeeze()
__A = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ):
'''simple docstring'''
return [len(_lowerCamelCase ) for x in Path(_lowerCamelCase ).open().readlines()]
def _SCREAMING_SNAKE_CASE ( self : Tuple, _lowerCamelCase : int ):
'''simple docstring'''
__A = torch.stack([x['''input_ids'''] for x in batch] )
__A = torch.stack([x['''attention_mask'''] for x in batch] )
__A = torch.stack([x['''decoder_input_ids'''] for x in batch] )
__A = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer, _lowerCamelCase )
else self.tokenizer.pad_token_id
)
__A = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer, _lowerCamelCase )
else self.tokenizer.pad_token_id
)
__A = trim_batch(_lowerCamelCase, _lowerCamelCase )
__A , __A = trim_batch(_lowerCamelCase, _lowerCamelCase, attention_mask=_lowerCamelCase )
__A = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
lowercase_ = getLogger(__name__)
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
return list(itertools.chain.from_iterable(__UpperCamelCase ) )
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = get_git_info()
save_json(__UpperCamelCase , os.path.join(__UpperCamelCase , '''git_log.json''' ) )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=4 , **__UpperCamelCase ):
"""simple docstring"""
with open(__UpperCamelCase , '''w''' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase , indent=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
with open(__UpperCamelCase ) as f:
return json.load(__UpperCamelCase )
def lowerCAmelCase ( ):
"""simple docstring"""
__A = git.Repo(search_parent_directories=__UpperCamelCase )
__A = {
'''repo_id''': str(__UpperCamelCase ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
return list(map(__UpperCamelCase , __UpperCamelCase ) )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
with open(__UpperCamelCase , '''wb''' ) as f:
return pickle.dump(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
def remove_articles(__UpperCamelCase ):
return re.sub(r'''\b(a|an|the)\b''' , ''' ''' , __UpperCamelCase )
def white_space_fix(__UpperCamelCase ):
return " ".join(text.split() )
def remove_punc(__UpperCamelCase ):
__A = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__UpperCamelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__UpperCamelCase ) ) ) )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = normalize_answer(__UpperCamelCase ).split()
__A = normalize_answer(__UpperCamelCase ).split()
__A = Counter(__UpperCamelCase ) & Counter(__UpperCamelCase )
__A = sum(common.values() )
if num_same == 0:
return 0
__A = 1.0 * num_same / len(__UpperCamelCase )
__A = 1.0 * num_same / len(__UpperCamelCase )
__A = (2 * precision * recall) / (precision + recall)
return fa
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
return normalize_answer(__UpperCamelCase ) == normalize_answer(__UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
__A = 0
for hypo, pred in zip(__UpperCamelCase , __UpperCamelCase ):
em += exact_match_score(__UpperCamelCase , __UpperCamelCase )
if len(__UpperCamelCase ) > 0:
em /= len(__UpperCamelCase )
return {"em": em}
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
return model_prefix.startswith('''rag''' )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__A = '''dropout_rate'''
for p in extra_params:
if getattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if not hasattr(__UpperCamelCase , __UpperCamelCase ) and not hasattr(__UpperCamelCase , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(__UpperCamelCase ) )
delattr(__UpperCamelCase , __UpperCamelCase )
continue
__A = p if hasattr(__UpperCamelCase , __UpperCamelCase ) else equivalent_param[p]
setattr(__UpperCamelCase , __UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
delattr(__UpperCamelCase , __UpperCamelCase )
return hparams, config
| 266 |
"""simple docstring"""
from __future__ import annotations
class snake_case :
'''simple docstring'''
def __init__( self : int, _lowerCamelCase : List[Any]=None ):
'''simple docstring'''
__A = data
__A = None
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
__A = []
__A = self
while temp:
string_rep.append(f'{temp.data}' )
__A = temp.next
return "->".join(_lowerCamelCase )
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if not elements_list:
raise Exception('''The Elements List is empty''' )
__A = __A = Node(elements_list[0] )
for i in range(1 , len(__UpperCamelCase ) ):
__A = Node(elements_list[i] )
__A = current.next
return head
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if head_node is not None and isinstance(__UpperCamelCase , __UpperCamelCase ):
print_reverse(head_node.next )
print(head_node.data )
def lowerCAmelCase ( ):
"""simple docstring"""
from doctest import testmod
testmod()
__A = make_linked_list([1_4, 5_2, 1_4, 1_2, 4_3] )
print('''Linked List:''' )
print(__UpperCamelCase )
print('''Elements in Reverse:''' )
print_reverse(__UpperCamelCase )
if __name__ == "__main__":
main()
| 266 | 1 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[Any] , __A : Dict , __A : str=1_3 , __A : Any=3_2 , __A : int=2 , __A : List[str]=3 , __A : Optional[Any]=1_6 , __A : str=[1, 2, 1] , __A : List[str]=[2, 2, 4] , __A : Any=2 , __A : List[Any]=2.0 , __A : Optional[int]=True , __A : str=0.0 , __A : Dict=0.0 , __A : Union[str, Any]=0.1 , __A : List[Any]="gelu" , __A : Optional[int]=False , __A : Tuple=True , __A : int=0.0_2 , __A : List[str]=1e-5 , __A : Optional[int]=True , __A : int=None , __A : Dict=True , __A : List[Any]=1_0 , __A : Optional[int]=8 , ):
snake_case__ : Any = parent
snake_case__ : Optional[Any] = batch_size
snake_case__ : Tuple = image_size
snake_case__ : Dict = patch_size
snake_case__ : str = num_channels
snake_case__ : List[Any] = embed_dim
snake_case__ : Dict = depths
snake_case__ : Dict = num_heads
snake_case__ : int = window_size
snake_case__ : List[str] = mlp_ratio
snake_case__ : Dict = qkv_bias
snake_case__ : Union[str, Any] = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : List[str] = drop_path_rate
snake_case__ : Optional[Any] = hidden_act
snake_case__ : Union[str, Any] = use_absolute_embeddings
snake_case__ : str = patch_norm
snake_case__ : Any = layer_norm_eps
snake_case__ : Optional[Any] = initializer_range
snake_case__ : List[str] = is_training
snake_case__ : Any = scope
snake_case__ : List[str] = use_labels
snake_case__ : Dict = type_sequence_label_size
snake_case__ : str = encoder_stride
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : str = None
if self.use_labels:
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : List[Any] = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Any ):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowercase ( self : int , __A : Optional[Any] , __A : Tuple , __A : str ):
snake_case__ : List[Any] = SwinvaModel(config=__A )
model.to(__A )
model.eval()
snake_case__ : Tuple = model(__A )
snake_case__ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
snake_case__ : Any = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowercase ( self : List[Any] , __A : Dict , __A : Union[str, Any] , __A : str ):
snake_case__ : Optional[int] = SwinvaForMaskedImageModeling(config=__A )
model.to(__A )
model.eval()
snake_case__ : List[Any] = model(__A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case__ : List[Any] = 1
snake_case__ : Optional[Any] = SwinvaForMaskedImageModeling(__A )
model.to(__A )
model.eval()
snake_case__ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : Tuple = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowercase ( self : Tuple , __A : Dict , __A : Tuple , __A : str ):
snake_case__ : Dict = self.type_sequence_label_size
snake_case__ : Optional[Any] = SwinvaForImageClassification(__A )
model.to(__A )
model.eval()
snake_case__ : Any = model(__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self : int ):
snake_case__ : List[str] = self.prepare_config_and_inputs()
snake_case__, snake_case__, snake_case__ : List[Any] = config_and_inputs
snake_case__ : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
a_ = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
def _lowercase ( self : int ):
snake_case__ : Tuple = SwinvaModelTester(self )
snake_case__ : Optional[int] = ConfigTester(self , config_class=__A , embed_dim=3_7 )
def _lowercase ( self : Dict ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self : List[Any] ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
@unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0." )
def _lowercase ( self : str ):
pass
@unittest.skip(reason="Swinv2 does not use inputs_embeds" )
def _lowercase ( self : Optional[Any] ):
pass
def _lowercase ( self : List[str] ):
snake_case__, snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Tuple = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , nn.Linear ) )
def _lowercase ( self : Optional[Any] ):
snake_case__, snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : str = model_class(__A )
snake_case__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : int = [*signature.parameters.keys()]
snake_case__ : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __A )
def _lowercase ( self : Optional[int] ):
snake_case__, snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Any = True
for model_class in self.all_model_classes:
snake_case__ : int = True
snake_case__ : str = False
snake_case__ : List[str] = True
snake_case__ : Optional[int] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
snake_case__ : Union[str, Any] = model(**self._prepare_for_class(__A , __A ) )
snake_case__ : List[Any] = outputs.attentions
snake_case__ : Any = len(self.model_tester.depths )
self.assertEqual(len(__A ) , __A )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case__ : List[Any] = True
snake_case__ : Dict = config.window_size**2
snake_case__ : List[Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
snake_case__ : str = model(**self._prepare_for_class(__A , __A ) )
snake_case__ : int = outputs.attentions
self.assertEqual(len(__A ) , __A )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
snake_case__ : Optional[int] = len(__A )
# Check attention is always last and order is fine
snake_case__ : List[Any] = True
snake_case__ : List[str] = True
snake_case__ : int = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
snake_case__ : Dict = model(**self._prepare_for_class(__A , __A ) )
if hasattr(self.model_tester , "num_hidden_states_types" ):
snake_case__ : Optional[int] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
snake_case__ : List[str] = 2
self.assertEqual(out_len + added_hidden_states , len(__A ) )
snake_case__ : List[Any] = outputs.attentions
self.assertEqual(len(__A ) , __A )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def _lowercase ( self : Optional[int] , __A : Optional[int] , __A : List[str] , __A : int , __A : Optional[int] ):
snake_case__ : Union[str, Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
snake_case__ : Union[str, Any] = model(**self._prepare_for_class(__A , __A ) )
snake_case__ : str = outputs.hidden_states
snake_case__ : Optional[int] = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__A ) , __A )
# Swinv2 has a different seq_length
snake_case__ : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case__ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
snake_case__ : Tuple = outputs.reshaped_hidden_states
self.assertEqual(len(__A ) , __A )
snake_case__, snake_case__, snake_case__, snake_case__ : int = reshaped_hidden_states[0].shape
snake_case__ : Union[str, Any] = (
reshaped_hidden_states[0].view(__A , __A , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowercase ( self : List[str] ):
snake_case__, snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
snake_case__ : Any = True
self.check_hidden_states_output(__A , __A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[Any] = True
self.check_hidden_states_output(__A , __A , __A , __A )
def _lowercase ( self : Tuple ):
snake_case__, snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : List[Any] = 3
snake_case__ : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case__ : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case__ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case__ : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
snake_case__ : Dict = True
self.check_hidden_states_output(__A , __A , __A , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : Dict = True
self.check_hidden_states_output(__A , __A , __A , (padded_height, padded_width) )
def _lowercase ( self : Optional[Any] ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__A )
def _lowercase ( self : List[Any] ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def _lowercase ( self : Dict ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Optional[Any] = SwinvaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def _lowercase ( self : Tuple ):
snake_case__, snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : str = _config_zero_init(__A )
for model_class in self.all_model_classes:
snake_case__ : List[str] = model_class(config=__A )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : str ):
return (
AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" )
if is_vision_available()
else None
)
@slow
def _lowercase ( self : str ):
snake_case__ : List[Any] = SwinvaForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ).to(
__A )
snake_case__ : Union[str, Any] = self.default_image_processor
snake_case__ : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
snake_case__ : Any = image_processor(images=__A , return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
snake_case__ : Union[str, Any] = model(**__A )
# verify the logits
snake_case__ : Optional[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __A )
snake_case__ : int = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1e-4 ) )
| 286 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
for param in module.parameters():
snake_case__ : Tuple = False
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
snake_case__ : List[Any] = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : List[str] = plt.imshow(snake_case_ )
fig.axes.get_xaxis().set_visible(snake_case_ )
fig.axes.get_yaxis().set_visible(snake_case_ )
plt.show()
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : str = datetime.now()
snake_case__ : Optional[Any] = current_time.strftime("%H:%M:%S" )
return timestamp
| 286 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase ( _snake_case : Any ) ->Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
__snake_case : Dict = True if '''large''' in model_name or '''huge''' in model_name else False
__snake_case : Optional[int] = True if '''large''' in model_name or '''huge''' in model_name else False
__snake_case : Optional[int] = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__snake_case : Tuple = [3, 3, 3, 3]
__snake_case : Dict = [5, 5, 5, 5]
elif "fl4" in model_name:
__snake_case : Any = [4, 4, 4, 4]
__snake_case : List[str] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__snake_case : Optional[int] = [3, 3, 3, 3]
if "lrf" in model_name:
__snake_case : Any = [3, 3, 3, 3]
else:
__snake_case : int = [2, 2, 2, 2]
if "tiny" in model_name:
__snake_case : str = 96
elif "small" in model_name:
__snake_case : Optional[int] = 96
elif "base" in model_name:
__snake_case : Any = 128
elif "large" in model_name:
__snake_case : Optional[Any] = 192
elif "xlarge" in model_name:
__snake_case : List[Any] = 256
elif "huge" in model_name:
__snake_case : Union[str, Any] = 352
# set label information
__snake_case : Union[str, Any] = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
__snake_case : int = '''imagenet-22k-id2label.json'''
else:
__snake_case : Optional[Any] = '''imagenet-1k-id2label.json'''
__snake_case : int = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='''dataset''' ) , '''r''' ) )
__snake_case : Dict = {int(_snake_case ): v for k, v in idalabel.items()}
__snake_case : Optional[int] = {v: k for k, v in idalabel.items()}
__snake_case : Optional[Any] = FocalNetConfig(
embed_dim=_snake_case , depths=_snake_case , focal_levels=_snake_case , focal_windows=_snake_case , use_conv_embed=_snake_case , idalabel=_snake_case , labelaid=_snake_case , use_post_layernorm=_snake_case , use_layerscale=_snake_case , )
return config
def lowercase ( _snake_case : Dict ) ->List[Any]:
"""simple docstring"""
if "patch_embed.proj" in name:
__snake_case : Tuple = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
__snake_case : Tuple = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
__snake_case : List[Any] = '''encoder.''' + name
if "encoder.layers" in name:
__snake_case : Optional[Any] = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
__snake_case : Any = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
__snake_case : List[str] = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__snake_case : Any = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__snake_case : List[Any] = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__snake_case : int = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
__snake_case : Optional[Any] = '''layernorm.weight'''
if name == "norm.bias":
__snake_case : List[str] = '''layernorm.bias'''
if "head" in name:
__snake_case : Union[str, Any] = name.replace('''head''' , '''classifier''' )
else:
__snake_case : int = '''focalnet.''' + name
return name
def lowercase ( _snake_case : Tuple , _snake_case : Dict , _snake_case : List[str]=False ) ->Any:
"""simple docstring"""
__snake_case : List[Any] = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
__snake_case : int = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , _snake_case )
__snake_case : int = torch.hub.load_state_dict_from_url(_snake_case , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
__snake_case : str = state_dict.pop(_snake_case )
__snake_case : Tuple = val
__snake_case : Any = get_focalnet_config(_snake_case )
__snake_case : List[Any] = FocalNetForImageClassification(_snake_case )
model.eval()
# load state dict
model.load_state_dict(_snake_case )
# verify conversion
__snake_case : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__snake_case : Any = BitImageProcessor(
do_resize=_snake_case , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_snake_case , crop_size=224 , do_normalize=_snake_case , image_mean=_snake_case , image_std=_snake_case , )
__snake_case : List[str] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
__snake_case : int = processor(images=_snake_case , return_tensors='''pt''' )
__snake_case : Optional[int] = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__snake_case : Optional[Any] = image_transforms(_snake_case ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _snake_case , atol=1e-4 )
__snake_case : Tuple = model(**_snake_case )
__snake_case : str = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__snake_case : Any = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
__snake_case : int = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
__snake_case : Optional[int] = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
__snake_case : List[Any] = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
__snake_case : Union[str, Any] = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
__snake_case : List[str] = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_snake_case )
processor.save_pretrained(_snake_case )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
SCREAMING_SNAKE_CASE : str = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 102 |
"""simple docstring"""
import numpy as np
def lowercase ( _snake_case : int , _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : int , _snake_case : Union[str, Any] ) ->Dict:
"""simple docstring"""
__snake_case : Union[str, Any] = int(np.ceil((x_end - xa) / h ) )
__snake_case : Dict = np.zeros((n + 1,) )
__snake_case : List[Any] = ya
__snake_case : int = xa
for k in range(_snake_case ):
__snake_case : Any = f(_snake_case , y[k] )
__snake_case : List[Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__snake_case : int = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__snake_case : Optional[int] = f(x + h , y[k] + h * ka )
__snake_case : Optional[int] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 | 1 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCamelCase : int =logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase : Optional[Any] ='''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=8 ) -> Dict:
UpperCamelCase__ : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCamelCase__ : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=512 , __lowerCAmelCase=512 ) -> str:
UpperCamelCase__ : Any = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCamelCase__ : Any = np.array(pil_image.convert("RGB" ) )
UpperCamelCase__ : Tuple = arr.astype(np.floataa ) / 127.5 - 1
UpperCamelCase__ : str = np.transpose(__lowerCAmelCase , [2, 0, 1] )
UpperCamelCase__ : int = torch.from_numpy(__lowerCAmelCase ).unsqueeze(0 )
return image
class __a ( A__ ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE : UNetaDConditionModel , SCREAMING_SNAKE_CASE : DDPMScheduler , SCREAMING_SNAKE_CASE : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , movq=SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : Any = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
UpperCamelCase__ : Tuple = min(int(num_inference_steps * strength ) , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase__ : Dict = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int=None ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(SCREAMING_SNAKE_CASE )}' )
UpperCamelCase__ : Dict = image.to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCamelCase__ : str = image
else:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Dict = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(SCREAMING_SNAKE_CASE )
]
UpperCamelCase__ : str = torch.cat(SCREAMING_SNAKE_CASE , dim=0 )
else:
UpperCamelCase__ : Optional[Any] = self.movq.encode(SCREAMING_SNAKE_CASE ).latent_dist.sample(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = self.movq.config.scaling_factor * init_latents
UpperCamelCase__ : Optional[Any] = torch.cat([init_latents] , dim=0 )
UpperCamelCase__ : Optional[int] = init_latents.shape
UpperCamelCase__ : str = randn_tensor(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )
# get latents
UpperCamelCase__ : str = self.scheduler.add_noise(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = init_latents
return latents
def __lowercase ( self : str , SCREAMING_SNAKE_CASE : List[Any]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCamelCase__ : Optional[Any] = torch.device(F'cuda:{gpu_id}' )
UpperCamelCase__ : Tuple = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowercase ( self : str , SCREAMING_SNAKE_CASE : Dict=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
UpperCamelCase__ : int = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCamelCase__ : Union[str, Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCamelCase__ : int = cpu_offload_with_hook(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prev_module_hook=SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
UpperCamelCase__ : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE )
def __call__( self : List[str] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, List[torch.FloatTensor]] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, List[torch.FloatTensor]] , SCREAMING_SNAKE_CASE : int = 5_12 , SCREAMING_SNAKE_CASE : int = 5_12 , SCREAMING_SNAKE_CASE : int = 1_00 , SCREAMING_SNAKE_CASE : float = 4.0 , SCREAMING_SNAKE_CASE : float = 0.3 , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE : Optional[str] = "pil" , SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
UpperCamelCase__ : Any = self._execution_device
UpperCamelCase__ : Union[str, Any] = guidance_scale > 1.0
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Dict = torch.cat(SCREAMING_SNAKE_CASE , dim=0 )
UpperCamelCase__ : Optional[int] = image_embeds.shape[0]
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Union[str, Any] = torch.cat(SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
UpperCamelCase__ : List[Any] = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE , dim=0 )
UpperCamelCase__ : Dict = negative_image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE , dim=0 )
UpperCamelCase__ : List[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=SCREAMING_SNAKE_CASE )
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : int = [image]
if not all(isinstance(SCREAMING_SNAKE_CASE , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F'Input is in incorrect format: {[type(SCREAMING_SNAKE_CASE ) for i in image]}. Currently, we only support PIL image and pytorch tensor' )
UpperCamelCase__ : Union[str, Any] = torch.cat([prepare_image(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for i in image] , dim=0 )
UpperCamelCase__ : int = image.to(dtype=image_embeds.dtype , device=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = self.movq.encode(SCREAMING_SNAKE_CASE )["latents"]
UpperCamelCase__ : str = latents.repeat_interleave(SCREAMING_SNAKE_CASE , dim=0 )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = self.get_timesteps(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCamelCase__ : Dict = downscale_height_and_width(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.movq_scale_factor )
UpperCamelCase__ : int = self.prepare_latents(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , image_embeds.dtype , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase__ : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase__ : List[Any] = {"image_embeds": image_embeds}
UpperCamelCase__ : Tuple = self.unet(
sample=SCREAMING_SNAKE_CASE , timestep=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , added_cond_kwargs=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , )[0]
if do_classifier_free_guidance:
UpperCamelCase__ : str = noise_pred.split(latents.shape[1] , dim=1 )
UpperCamelCase__ : Optional[Any] = noise_pred.chunk(2 )
UpperCamelCase__ : Any = variance_pred.chunk(2 )
UpperCamelCase__ : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCamelCase__ : str = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCamelCase__ : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ : int = self.scheduler.step(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , )[0]
# post-processing
UpperCamelCase__ : Dict = self.movq.decode(SCREAMING_SNAKE_CASE , force_not_quantize=SCREAMING_SNAKE_CASE )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
UpperCamelCase__ : int = image * 0.5 + 0.5
UpperCamelCase__ : Optional[Any] = image.clamp(0 , 1 )
UpperCamelCase__ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase__ : List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE ) | 369 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[int] =logging.get_logger(__name__)
lowerCamelCase : Optional[Any] ={
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __a ( A__ ):
_lowerCAmelCase : str = '''decision_transformer'''
_lowerCAmelCase : Optional[Any] = ['''past_key_values''']
_lowerCAmelCase : Optional[int] = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int]=17 , SCREAMING_SNAKE_CASE : Optional[int]=4 , SCREAMING_SNAKE_CASE : Optional[int]=1_28 , SCREAMING_SNAKE_CASE : Dict=40_96 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Tuple=1 , SCREAMING_SNAKE_CASE : Optional[int]=10_24 , SCREAMING_SNAKE_CASE : Any=3 , SCREAMING_SNAKE_CASE : str=1 , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Dict="relu" , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : str=0.1 , SCREAMING_SNAKE_CASE : str=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=1e-5 , SCREAMING_SNAKE_CASE : Optional[int]=0.0_2 , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : int=5_02_56 , SCREAMING_SNAKE_CASE : List[Any]=5_02_56 , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : List[Any]=False , **SCREAMING_SNAKE_CASE : Optional[int] , ):
'''simple docstring'''
UpperCamelCase__ : List[str] = state_dim
UpperCamelCase__ : Optional[int] = act_dim
UpperCamelCase__ : Dict = hidden_size
UpperCamelCase__ : Any = max_ep_len
UpperCamelCase__ : Optional[Any] = action_tanh
UpperCamelCase__ : str = vocab_size
UpperCamelCase__ : List[str] = n_positions
UpperCamelCase__ : Tuple = n_layer
UpperCamelCase__ : Union[str, Any] = n_head
UpperCamelCase__ : Dict = n_inner
UpperCamelCase__ : int = activation_function
UpperCamelCase__ : List[str] = resid_pdrop
UpperCamelCase__ : Optional[int] = embd_pdrop
UpperCamelCase__ : Optional[Any] = attn_pdrop
UpperCamelCase__ : Optional[Any] = layer_norm_epsilon
UpperCamelCase__ : int = initializer_range
UpperCamelCase__ : Dict = scale_attn_weights
UpperCamelCase__ : Tuple = use_cache
UpperCamelCase__ : List[str] = scale_attn_by_inverse_layer_idx
UpperCamelCase__ : Union[str, Any] = reorder_and_upcast_attn
UpperCamelCase__ : Optional[Any] = bos_token_id
UpperCamelCase__ : Optional[int] = eos_token_id
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) | 196 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :"DiagonalGaussianDistribution"
class _lowerCAmelCase ( a , a ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = True
@register_to_config
def __init__( self , __UpperCAmelCase = 3 , __UpperCAmelCase = 3 , __UpperCAmelCase = ("DownEncoderBlock2D",) , __UpperCAmelCase = ("UpDecoderBlock2D",) , __UpperCAmelCase = (6_4,) , __UpperCAmelCase = 1 , __UpperCAmelCase = "silu" , __UpperCAmelCase = 4 , __UpperCAmelCase = 3_2 , __UpperCAmelCase = 3_2 , __UpperCAmelCase = 0.1_82_15 , ):
'''simple docstring'''
super().__init__()
# pass init params to Encoder
lowerCAmelCase__ :Optional[Any] = Encoder(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , down_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , act_fn=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , double_z=__UpperCAmelCase , )
# pass init params to Decoder
lowerCAmelCase__ :Any = Decoder(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , up_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , act_fn=__UpperCAmelCase , )
lowerCAmelCase__ :Dict = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
lowerCAmelCase__ :List[str] = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 )
lowerCAmelCase__ :List[str] = False
lowerCAmelCase__ :Any = False
# only relevant if vae tiling is enabled
lowerCAmelCase__ :Tuple = self.config.sample_size
lowerCAmelCase__ :Optional[Any] = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
lowerCAmelCase__ :Tuple = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
lowerCAmelCase__ :List[Any] = 0.25
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , (Encoder, Decoder) ):
lowerCAmelCase__ :Tuple = value
def snake_case ( self , __UpperCAmelCase = True ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = use_tiling
def snake_case ( self ):
'''simple docstring'''
self.enable_tiling(__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = True
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = {}
def fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if hasattr(__UpperCAmelCase , 'set_processor' ):
lowerCAmelCase__ :str = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , __UpperCAmelCase , __UpperCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return processors
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = len(self.attn_processors.keys() )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(__UpperCAmelCase )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if hasattr(__UpperCAmelCase , 'set_processor' ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
module.set_processor(__UpperCAmelCase )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , __UpperCAmelCase , __UpperCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(__UpperCAmelCase , return_dict=__UpperCAmelCase )
if self.use_slicing and x.shape[0] > 1:
lowerCAmelCase__ :Tuple = [self.encoder(__UpperCAmelCase ) for x_slice in x.split(1 )]
lowerCAmelCase__ :Dict = torch.cat(__UpperCAmelCase )
else:
lowerCAmelCase__ :Optional[Any] = self.encoder(__UpperCAmelCase )
lowerCAmelCase__ :Dict = self.quant_conv(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = DiagonalGaussianDistribution(__UpperCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(__UpperCAmelCase , return_dict=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = self.post_quant_conv(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = self.decoder(__UpperCAmelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
@apply_forward_hook
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
if self.use_slicing and z.shape[0] > 1:
lowerCAmelCase__ :str = [self._decode(__UpperCAmelCase ).sample for z_slice in z.split(1 )]
lowerCAmelCase__ :str = torch.cat(__UpperCAmelCase )
else:
lowerCAmelCase__ :int = self._decode(__UpperCAmelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = min(a.shape[2] , b.shape[2] , __UpperCAmelCase )
for y in range(__UpperCAmelCase ):
lowerCAmelCase__ :Optional[Any] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = min(a.shape[3] , b.shape[3] , __UpperCAmelCase )
for x in range(__UpperCAmelCase ):
lowerCAmelCase__ :Tuple = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
lowerCAmelCase__ :Optional[Any] = int(self.tile_latent_min_size * self.tile_overlap_factor )
lowerCAmelCase__ :Any = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
lowerCAmelCase__ :Optional[int] = []
for i in range(0 , x.shape[2] , __UpperCAmelCase ):
lowerCAmelCase__ :Dict = []
for j in range(0 , x.shape[3] , __UpperCAmelCase ):
lowerCAmelCase__ :Union[str, Any] = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
lowerCAmelCase__ :int = self.encoder(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = self.quant_conv(__UpperCAmelCase )
row.append(__UpperCAmelCase )
rows.append(__UpperCAmelCase )
lowerCAmelCase__ :int = []
for i, row in enumerate(__UpperCAmelCase ):
lowerCAmelCase__ :Tuple = []
for j, tile in enumerate(__UpperCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
lowerCAmelCase__ :Tuple = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase )
if j > 0:
lowerCAmelCase__ :Union[str, Any] = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) )
lowerCAmelCase__ :List[Any] = torch.cat(__UpperCAmelCase , dim=2 )
lowerCAmelCase__ :int = DiagonalGaussianDistribution(__UpperCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
lowerCAmelCase__ :Dict = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
lowerCAmelCase__ :List[str] = int(self.tile_sample_min_size * self.tile_overlap_factor )
lowerCAmelCase__ :List[str] = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
lowerCAmelCase__ :Any = []
for i in range(0 , z.shape[2] , __UpperCAmelCase ):
lowerCAmelCase__ :int = []
for j in range(0 , z.shape[3] , __UpperCAmelCase ):
lowerCAmelCase__ :Any = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
lowerCAmelCase__ :Dict = self.post_quant_conv(__UpperCAmelCase )
lowerCAmelCase__ :Dict = self.decoder(__UpperCAmelCase )
row.append(__UpperCAmelCase )
rows.append(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = []
for i, row in enumerate(__UpperCAmelCase ):
lowerCAmelCase__ :Optional[Any] = []
for j, tile in enumerate(__UpperCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
lowerCAmelCase__ :str = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase )
if j > 0:
lowerCAmelCase__ :Union[str, Any] = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) )
lowerCAmelCase__ :Dict = torch.cat(__UpperCAmelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = sample
lowerCAmelCase__ :List[str] = self.encode(__UpperCAmelCase ).latent_dist
if sample_posterior:
lowerCAmelCase__ :Tuple = posterior.sample(generator=__UpperCAmelCase )
else:
lowerCAmelCase__ :List[Any] = posterior.mode()
lowerCAmelCase__ :List[Any] = self.decode(__UpperCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
| 293 |
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(a )
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
requires_backends(self , 'vision' )
requires_backends(self , 'torch' )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
self.check_model_type(__UpperCAmelCase )
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = {}
lowerCAmelCase__ :Tuple = {}
lowerCAmelCase__ :Any = {}
# preprocess args
if "points_per_batch" in kwargs:
lowerCAmelCase__ :Dict = kwargs['points_per_batch']
if "points_per_crop" in kwargs:
lowerCAmelCase__ :Union[str, Any] = kwargs['points_per_crop']
if "crops_n_layers" in kwargs:
lowerCAmelCase__ :Any = kwargs['crops_n_layers']
if "crop_overlap_ratio" in kwargs:
lowerCAmelCase__ :Any = kwargs['crop_overlap_ratio']
if "crop_n_points_downscale_factor" in kwargs:
lowerCAmelCase__ :Dict = kwargs['crop_n_points_downscale_factor']
# postprocess args
if "pred_iou_thresh" in kwargs:
lowerCAmelCase__ :Tuple = kwargs['pred_iou_thresh']
if "stability_score_offset" in kwargs:
lowerCAmelCase__ :Optional[int] = kwargs['stability_score_offset']
if "mask_threshold" in kwargs:
lowerCAmelCase__ :List[Any] = kwargs['mask_threshold']
if "stability_score_thresh" in kwargs:
lowerCAmelCase__ :Optional[Any] = kwargs['stability_score_thresh']
if "crops_nms_thresh" in kwargs:
lowerCAmelCase__ :int = kwargs['crops_nms_thresh']
if "output_rle_mask" in kwargs:
lowerCAmelCase__ :Union[str, Any] = kwargs['output_rle_mask']
if "output_bboxes_mask" in kwargs:
lowerCAmelCase__ :Optional[Any] = kwargs['output_bboxes_mask']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , __UpperCAmelCase , *__UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
return super().__call__(__UpperCAmelCase , *__UpperCAmelCase , num_workers=__UpperCAmelCase , batch_size=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=6_4 , __UpperCAmelCase = 0 , __UpperCAmelCase = 5_1_2 / 1_5_0_0 , __UpperCAmelCase = 3_2 , __UpperCAmelCase = 1 , ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = load_image(__UpperCAmelCase )
lowerCAmelCase__ :int = self.image_processor.size['longest_edge']
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :int = self.image_processor.generate_crop_boxes(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = self.image_processor(images=__UpperCAmelCase , return_tensors='pt' )
with self.device_placement():
if self.framework == "pt":
lowerCAmelCase__ :Optional[int] = self.get_inference_context()
with inference_context():
lowerCAmelCase__ :Any = self._ensure_tensor_on_device(__UpperCAmelCase , device=self.device )
lowerCAmelCase__ :Tuple = self.model.get_image_embeddings(model_inputs.pop('pixel_values' ) )
lowerCAmelCase__ :Optional[int] = image_embeddings
lowerCAmelCase__ :List[Any] = grid_points.shape[1]
lowerCAmelCase__ :Union[str, Any] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '
'To return all points at once, set points_per_batch to None' )
for i in range(0 , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ :Optional[Any] = grid_points[:, i : i + points_per_batch, :, :]
lowerCAmelCase__ :List[str] = input_labels[:, i : i + points_per_batch]
lowerCAmelCase__ :List[Any] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0.88 , __UpperCAmelCase=0.95 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , ):
'''simple docstring'''
lowerCAmelCase__ :Any = model_inputs.pop('input_boxes' )
lowerCAmelCase__ :Optional[int] = model_inputs.pop('is_last' )
lowerCAmelCase__ :Dict = model_inputs.pop('original_sizes' ).tolist()
lowerCAmelCase__ :Dict = model_inputs.pop('reshaped_input_sizes' ).tolist()
lowerCAmelCase__ :Optional[int] = self.model(**__UpperCAmelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
lowerCAmelCase__ :int = model_outputs['pred_masks']
lowerCAmelCase__ :Optional[Any] = self.image_processor.post_process_masks(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , binarize=__UpperCAmelCase )
lowerCAmelCase__ :Any = model_outputs['iou_scores']
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Tuple = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=0.7 , ):
'''simple docstring'''
lowerCAmelCase__ :Dict = []
lowerCAmelCase__ :Optional[Any] = []
lowerCAmelCase__ :int = []
for model_output in model_outputs:
all_scores.append(model_output.pop('iou_scores' ) )
all_masks.extend(model_output.pop('masks' ) )
all_boxes.append(model_output.pop('boxes' ) )
lowerCAmelCase__ :Dict = torch.cat(__UpperCAmelCase )
lowerCAmelCase__ :Dict = torch.cat(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Any = self.image_processor.post_process_for_mask_generation(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Tuple = defaultdict(__UpperCAmelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = {}
if output_rle_mask:
lowerCAmelCase__ :str = rle_mask
if output_bboxes_mask:
lowerCAmelCase__ :Optional[int] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 293 | 1 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple=1_3 , lowerCAmelCase_ : Tuple=7 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Optional[Any]=9_9 , lowerCAmelCase_ : Dict=3_2 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : List[str]=4 , lowerCAmelCase_ : Union[str, Any]=3_7 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Tuple=5_1_2 , lowerCAmelCase_ : Union[str, Any]=1_6 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : int=0.02 , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : str="None" , lowerCAmelCase_ : str=3 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : str=None , ) -> Optional[Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = relative_attention
__lowerCAmelCase = position_biased_input
__lowerCAmelCase = pos_att_type
__lowerCAmelCase = scope
def lowercase ( self : Union[str, Any] ) -> Any:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=lowerCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] ) -> Union[str, Any]:
__lowerCAmelCase = TFDebertaVaModel(config=lowerCAmelCase_ )
__lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowerCAmelCase = [input_ids, input_mask]
__lowerCAmelCase = model(lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any ) -> List[str]:
__lowerCAmelCase = TFDebertaVaForMaskedLM(config=lowerCAmelCase_ )
__lowerCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] ) -> Optional[int]:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = TFDebertaVaForSequenceClassification(config=lowerCAmelCase_ )
__lowerCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any ) -> str:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = TFDebertaVaForTokenClassification(config=lowerCAmelCase_ )
__lowerCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Any ) -> int:
__lowerCAmelCase = TFDebertaVaForQuestionAnswering(config=lowerCAmelCase_ )
__lowerCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self : Union[str, Any] ) -> Any:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
a_ = (
{
"""feature-extraction""": TFDebertaVaModel,
"""fill-mask""": TFDebertaVaForMaskedLM,
"""question-answering""": TFDebertaVaForQuestionAnswering,
"""text-classification""": TFDebertaVaForSequenceClassification,
"""token-classification""": TFDebertaVaForTokenClassification,
"""zero-shot""": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ = False
a_ = False
def lowercase ( self : Optional[int] ) -> Tuple:
__lowerCAmelCase = TFDebertaVaModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : Optional[int] ) -> int:
self.config_tester.run_common_tests()
def lowercase ( self : Optional[int] ) -> Optional[int]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : int ) -> List[str]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ )
def lowercase ( self : Optional[Any] ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ )
def lowercase ( self : Optional[int] ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_ )
def lowercase ( self : int ) -> int:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ )
@slow
def lowercase ( self : int ) -> Optional[int]:
__lowerCAmelCase = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
self.assertIsNotNone(lowerCAmelCase_ )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='Model not available yet' )
def lowercase ( self : Dict ) -> int:
pass
@slow
def lowercase ( self : int ) -> int:
__lowerCAmelCase = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
__lowerCAmelCase = tf.constant([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__lowerCAmelCase = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
__lowerCAmelCase = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1e-4 )
| 207 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase ( self : Union[str, Any] ) -> Tuple:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase ( self : List[Any] ) -> int:
__lowerCAmelCase = ort.SessionOptions()
__lowerCAmelCase = False
return options
def lowercase ( self : Tuple ) -> List[Any]:
__lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
__lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
__lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy' )
# using the PNDM scheduler by default
__lowerCAmelCase = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = 'A red cat sitting on a park bench'
__lowerCAmelCase = np.random.RandomState(0 )
__lowerCAmelCase = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_5 , generator=lowerCAmelCase_ , output_type='np' , )
__lowerCAmelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 207 | 1 |
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : str = ['''input_features''', '''attention_mask''']
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Tuple=8_0 , SCREAMING_SNAKE_CASE__ : Dict=1_6_0_0_0 , SCREAMING_SNAKE_CASE__ : List[Any]=8_0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : int=True , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> List[Any]:
super().__init__(feature_size=SCREAMING_SNAKE_CASE__ , sampling_rate=SCREAMING_SNAKE_CASE__ , padding_value=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = num_mel_bins
a_ : Any = do_ceptral_normalize
a_ : Optional[Any] = normalize_means
a_ : Dict = normalize_vars
a_ : Union[str, Any] = True
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : np.ndarray , ) -> np.ndarray:
a_ : Optional[int] = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers
a_ : Union[str, Any] = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
a_ : Optional[Any] = ta_kaldi.fbank(SCREAMING_SNAKE_CASE__ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[bool] = True , SCREAMING_SNAKE_CASE__ : Optional[bool] = True , SCREAMING_SNAKE_CASE__ : float = 0.0 , ) -> np.ndarray:
# make sure we normalize float32 arrays
if normalize_means:
a_ : Union[str, Any] = x[:input_length].mean(axis=0 )
a_ : List[Any] = np.subtract(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if normalize_vars:
a_ : Tuple = x[:input_length].std(axis=0 )
a_ : str = np.divide(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if input_length < x.shape[0]:
a_ : Tuple = padding_value
# make sure array is in float32
a_ : str = x.astype(np.floataa )
return x
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[np.ndarray] , SCREAMING_SNAKE_CASE__ : Optional[np.ndarray] = None ) -> List[np.ndarray]:
a_ : Union[str, Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
]
def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , SCREAMING_SNAKE_CASE__ : Union[bool, str, PaddingStrategy] = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , **SCREAMING_SNAKE_CASE__ : Any , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
a_ : Any = isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
a_ : Any = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a_ : Optional[int] = [np.asarray(SCREAMING_SNAKE_CASE__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
a_ : Tuple = np.asarray(SCREAMING_SNAKE_CASE__ , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a_ : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a_ : List[str] = [raw_speech]
# extract fbank features
a_ : Any = [self._extract_fbank_features(SCREAMING_SNAKE_CASE__ ) for waveform in raw_speech]
# convert into correct format for padding
a_ : List[Any] = BatchFeature({'input_features': features} )
a_ : List[Any] = self.pad(
SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
# make sure list is in array format
a_ : Any = padded_inputs.get('input_features' )
if isinstance(input_features[0] , SCREAMING_SNAKE_CASE__ ):
a_ : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE__ , dtype=np.floataa ) for feature in input_features]
a_ : str = padded_inputs.get('attention_mask' )
if attention_mask is not None:
a_ : List[Any] = [np.asarray(SCREAMING_SNAKE_CASE__ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
a_ : List[str] = (
np.array(SCREAMING_SNAKE_CASE__ , dtype=np.intaa )
if self._get_padding_strategies(SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
a_ : List[str] = self.normalize(
padded_inputs['input_features'] , attention_mask=SCREAMING_SNAKE_CASE__ )
if return_tensors is not None:
a_ : List[str] = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE__ )
return padded_inputs
| 32 |
"""simple docstring"""
class A_ :
"""simple docstring"""
def __init__( self :List[Any] , lowercase_ :int ) -> None:
UpperCAmelCase = size
UpperCAmelCase = [0] * size
UpperCAmelCase = [0] * size
@staticmethod
def UpperCAmelCase__ ( lowercase_ :int ) -> int:
return index | (index + 1)
@staticmethod
def UpperCAmelCase__ ( lowercase_ :int ) -> int:
return (index & (index + 1)) - 1
def UpperCAmelCase__ ( self :Any , lowercase_ :int , lowercase_ :int ) -> None:
UpperCAmelCase = value
while index < self.size:
UpperCAmelCase = self.get_prev(lowercase_ ) + 1
if current_left_border == index:
UpperCAmelCase = value
else:
UpperCAmelCase = max(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = self.get_next(lowercase_ )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :int , lowercase_ :int ) -> int:
right -= 1 # Because of right is exclusive
UpperCAmelCase = 0
while left <= right:
UpperCAmelCase = self.get_prev(lowercase_ )
if left <= current_left:
UpperCAmelCase = max(lowercase_ , self.tree[right] )
UpperCAmelCase = current_left
else:
UpperCAmelCase = max(lowercase_ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
__lowerCamelCase : List[str] = logging.getLogger(__name__)
@dataclass
class A__ :
_UpperCAmelCase :List[str] = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_UpperCAmelCase :Dict = field(
default=__snake_case , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_UpperCAmelCase :List[Any] = field(
default=__snake_case , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_UpperCAmelCase :Any = field(
default=__snake_case , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
_UpperCAmelCase :List[str] = field(
default=__snake_case , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
_UpperCAmelCase :Optional[Any] = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_UpperCAmelCase :Optional[Any] = field(
default=__snake_case , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
@dataclass
class A__ :
_UpperCAmelCase :Union[str, Any] = field(default=__snake_case , metadata={'help': 'The input training data file (a text file).'} )
_UpperCAmelCase :Tuple = field(
default=__snake_case , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
_UpperCAmelCase :int = field(
default=__snake_case , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
_UpperCAmelCase :List[str] = field(
default=__snake_case , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
_UpperCAmelCase :Optional[Any] = field(
default=__snake_case , metadata={
'help': (
'The maximum total input sequence length after tokenization. If passed, sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_UpperCAmelCase :str = field(
default=__snake_case , metadata={
'help': (
'Whether to pad all samples to the maximum sentence length. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch. More '
'efficient on GPU but very bad for TPU.'
)
} , )
_UpperCAmelCase :Any = field(
default=__snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_UpperCAmelCase :List[Any] = field(
default=__snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def __UpperCamelCase( self ):
'''simple docstring'''
if self.train_file is not None:
UpperCamelCase : str = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCamelCase : List[Any] = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class A__ :
_UpperCAmelCase :Optional[Any] = 4_2
_UpperCAmelCase :int = True
_UpperCAmelCase :List[str] = None
_UpperCAmelCase :List[Any] = None
def __call__( self , A_ ):
'''simple docstring'''
UpperCamelCase : int = "label" if "label" in features[0].keys() else "labels"
UpperCamelCase : Optional[int] = [feature.pop(__A ) for feature in features]
UpperCamelCase : Union[str, Any] = len(__A )
UpperCamelCase : str = len(features[0]["input_ids"] )
UpperCamelCase : Optional[Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(__A )] for feature in features
]
UpperCamelCase : Union[str, Any] = list(chain(*__A ) )
UpperCamelCase : Optional[int] = self.tokenizer.pad(
__A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
UpperCamelCase : str = {k: v.view(__A , __A , -1 ) for k, v in batch.items()}
# Add back labels
UpperCamelCase : Union[str, Any] = torch.tensor(__A , dtype=torch.intaa )
return batch
def A_ ( ) -> str:
UpperCamelCase : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , _lowercase , _lowercase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(_lowercase )
datasets.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCamelCase : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCamelCase : Any = {}
if data_args.train_file is not None:
UpperCamelCase : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
UpperCamelCase : int = data_args.validation_file
UpperCamelCase : Optional[Any] = data_args.train_file.split("." )[-1]
UpperCamelCase : List[str] = load_dataset(
_lowercase , data_files=_lowercase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCamelCase : List[Any] = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase : Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase : str = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCamelCase : Optional[int] = [F"""ending{i}""" for i in range(4 )]
UpperCamelCase : Union[str, Any] = "sent1"
UpperCamelCase : List[str] = "sent2"
if data_args.max_seq_length is None:
UpperCamelCase : Dict = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
UpperCamelCase : int = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
UpperCamelCase : List[str] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_lowerCAmelCase ):
UpperCamelCase : int = [[context] * 4 for context in examples[context_name]]
UpperCamelCase : Dict = examples[question_header_name]
UpperCamelCase : List[Any] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(_lowercase )
]
# Flatten out
UpperCamelCase : str = list(chain(*_lowercase ) )
UpperCamelCase : Union[str, Any] = list(chain(*_lowercase ) )
# Tokenize
UpperCamelCase : List[str] = tokenizer(
_lowercase , _lowercase , truncation=_lowercase , max_length=_lowercase , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_lowercase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
UpperCamelCase : List[str] = raw_datasets["train"]
if data_args.max_train_samples is not None:
UpperCamelCase : Optional[int] = min(len(_lowercase ) , data_args.max_train_samples )
UpperCamelCase : Union[str, Any] = train_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
UpperCamelCase : Optional[int] = train_dataset.map(
_lowercase , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
UpperCamelCase : List[str] = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
UpperCamelCase : Any = min(len(_lowercase ) , data_args.max_eval_samples )
UpperCamelCase : List[str] = eval_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
UpperCamelCase : List[Any] = eval_dataset.map(
_lowercase , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCamelCase : Any = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_lowercase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_lowerCAmelCase ):
UpperCamelCase , UpperCamelCase : List[Any] = eval_predictions
UpperCamelCase : List[str] = np.argmax(_lowercase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCamelCase : Optional[Any] = Trainer(
model=_lowercase , args=_lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_lowercase , data_collator=_lowercase , compute_metrics=_lowercase , )
# Training
if training_args.do_train:
UpperCamelCase : List[str] = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase : Any = last_checkpoint
UpperCamelCase : Any = trainer.train(resume_from_checkpoint=_lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCamelCase : Union[str, Any] = train_result.metrics
UpperCamelCase : int = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowercase )
)
UpperCamelCase : Union[str, Any] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics("train" , _lowercase )
trainer.save_metrics("train" , _lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCamelCase : Union[str, Any] = trainer.evaluate()
UpperCamelCase : Union[str, Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowercase )
UpperCamelCase : List[Any] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics("eval" , _lowercase )
trainer.save_metrics("eval" , _lowercase )
UpperCamelCase : str = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowercase )
else:
trainer.create_model_card(**_lowercase )
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
main()
if __name__ == "__main__":
main()
| 359 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__lowerCamelCase : Optional[int] = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def A_ ( _lowerCAmelCase , _lowerCAmelCase=None ) -> Optional[Any]:
require_version(deps[pkg] , _lowerCAmelCase )
| 140 | 0 |
"""simple docstring"""
from functools import lru_cache
@lru_cache
def a__ ( snake_case__ ) -> int:
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 291 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( __lowercase , unittest.TestCase):
'''simple docstring'''
_A = KandinskyVaaPriorPipeline
_A = ['prompt']
_A = ['prompt', 'negative_prompt']
_A = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
_A = False
@property
def _lowerCamelCase ( self :Tuple ) -> Optional[Any]:
return 3_2
@property
def _lowerCamelCase ( self :List[str] ) -> List[str]:
return 3_2
@property
def _lowerCamelCase ( self :Union[str, Any] ) -> Tuple:
return self.time_input_dim
@property
def _lowerCamelCase ( self :Union[str, Any] ) -> Any:
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self :Tuple ) -> str:
return 1_0_0
@property
def _lowerCamelCase ( self :Union[str, Any] ) -> Any:
__UpperCamelCase : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def _lowerCamelCase ( self :str ) -> int:
torch.manual_seed(0 )
__UpperCamelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(a )
@property
def _lowerCamelCase ( self :int ) -> Optional[Any]:
torch.manual_seed(0 )
__UpperCamelCase : Optional[int] = {
"num_attention_heads": 2,
"attention_head_dim": 1_2,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
__UpperCamelCase : List[Any] = PriorTransformer(**a )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__UpperCamelCase : Optional[int] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def _lowerCamelCase ( self :Dict ) -> Union[str, Any]:
torch.manual_seed(0 )
__UpperCamelCase : Union[str, Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
__UpperCamelCase : int = CLIPVisionModelWithProjection(a )
return model
@property
def _lowerCamelCase ( self :Dict ) -> Optional[Any]:
__UpperCamelCase : List[str] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=a , do_normalize=a , do_resize=a , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_2_4 , )
return image_processor
def _lowerCamelCase ( self :str ) -> Optional[int]:
__UpperCamelCase : str = self.dummy_prior
__UpperCamelCase : int = self.dummy_image_encoder
__UpperCamelCase : Tuple = self.dummy_text_encoder
__UpperCamelCase : int = self.dummy_tokenizer
__UpperCamelCase : Optional[Any] = self.dummy_image_processor
__UpperCamelCase : int = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=a , clip_sample_range=10.0 , )
__UpperCamelCase : List[Any] = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def _lowerCamelCase ( self :Optional[Any] , a :int , a :Union[str, Any]=0 ) -> Any:
if str(a ).startswith("mps" ):
__UpperCamelCase : int = torch.manual_seed(a )
else:
__UpperCamelCase : List[Any] = torch.Generator(device=a ).manual_seed(a )
__UpperCamelCase : int = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def _lowerCamelCase ( self :List[Any] ) -> Dict:
__UpperCamelCase : int = "cpu"
__UpperCamelCase : List[str] = self.get_dummy_components()
__UpperCamelCase : List[str] = self.pipeline_class(**a )
__UpperCamelCase : Tuple = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__UpperCamelCase : int = pipe(**self.get_dummy_inputs(a ) )
__UpperCamelCase : int = output.image_embeds
__UpperCamelCase : Optional[int] = pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
__UpperCamelCase : Union[str, Any] = image[0, -1_0:]
__UpperCamelCase : List[str] = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
__UpperCamelCase : List[Any] = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _lowerCamelCase ( self :int ) -> Union[str, Any]:
__UpperCamelCase : str = torch_device == "cpu"
__UpperCamelCase : List[str] = True
__UpperCamelCase : List[Any] = False
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , test_mean_pixel_difference=a , )
@skip_mps
def _lowerCamelCase ( self :Any ) -> int:
__UpperCamelCase : Optional[Any] = torch_device == "cpu"
__UpperCamelCase : Dict = False
self._test_attention_slicing_forward_pass(
test_max_difference=a , test_mean_pixel_difference=a , ) | 232 | 0 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class UpperCamelCase ( lowercase_ ):
lowercase = ['image_processor', 'tokenizer']
lowercase = 'BlipImageProcessor'
lowercase = 'AutoTokenizer'
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
super().__init__(__UpperCamelCase ,__UpperCamelCase )
# add QFormer tokenizer
lowercase_ : Any = qformer_tokenizer
def __call__( self ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = True ,__UpperCamelCase = False ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = 0 ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = False ,__UpperCamelCase = False ,__UpperCamelCase = False ,__UpperCamelCase = False ,__UpperCamelCase = False ,__UpperCamelCase = True ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> BatchFeature:
'''simple docstring'''
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
lowercase_ : Union[str, Any] = BatchFeature()
if text is not None:
lowercase_ : Tuple = self.tokenizer(
text=__UpperCamelCase ,add_special_tokens=__UpperCamelCase ,padding=__UpperCamelCase ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase ,stride=__UpperCamelCase ,pad_to_multiple_of=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,return_overflowing_tokens=__UpperCamelCase ,return_special_tokens_mask=__UpperCamelCase ,return_offsets_mapping=__UpperCamelCase ,return_token_type_ids=__UpperCamelCase ,return_length=__UpperCamelCase ,verbose=__UpperCamelCase ,return_tensors=__UpperCamelCase ,**__UpperCamelCase ,)
encoding.update(__UpperCamelCase )
lowercase_ : List[str] = self.qformer_tokenizer(
text=__UpperCamelCase ,add_special_tokens=__UpperCamelCase ,padding=__UpperCamelCase ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase ,stride=__UpperCamelCase ,pad_to_multiple_of=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,return_overflowing_tokens=__UpperCamelCase ,return_special_tokens_mask=__UpperCamelCase ,return_offsets_mapping=__UpperCamelCase ,return_token_type_ids=__UpperCamelCase ,return_length=__UpperCamelCase ,verbose=__UpperCamelCase ,return_tensors=__UpperCamelCase ,**__UpperCamelCase ,)
lowercase_ : Optional[Any] = qformer_text_encoding.pop('input_ids' )
lowercase_ : Union[str, Any] = qformer_text_encoding.pop('attention_mask' )
if images is not None:
lowercase_ : str = self.image_processor(__UpperCamelCase ,return_tensors=__UpperCamelCase )
encoding.update(__UpperCamelCase )
return encoding
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Any:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCamelCase ,**__UpperCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : str = self.tokenizer.model_input_names
lowercase_ : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,**__UpperCamelCase ) -> Any:
'''simple docstring'''
if os.path.isfile(__UpperCamelCase ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase )
lowercase_ : List[Any] = os.path.join(__UpperCamelCase ,'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(__UpperCamelCase )
return super().save_pretrained(__UpperCamelCase ,**__UpperCamelCase )
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : List[Any] = AutoTokenizer.from_pretrained(__UpperCamelCase ,subfolder='qformer_tokenizer' )
lowercase_ : List[str] = cls._get_arguments_from_pretrained(__UpperCamelCase ,**__UpperCamelCase )
args.append(__UpperCamelCase )
return cls(*__UpperCamelCase )
| 321 | """simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
def get_masked_lm_array(__SCREAMING_SNAKE_CASE : str ):
lowercase_ : int = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : str = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : List[Any] = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
def get_encoder_array(__SCREAMING_SNAKE_CASE : str ):
lowercase_ : Tuple = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : Tuple = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
def get_encoder_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str ):
lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : List[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : List[str] = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
def get_encoder_attention_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] ):
lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = array.reshape(__SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : List[str] = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
print(F'''Loading model based on config from {config_path}...''' )
lowercase_ : Any = BertConfig.from_json_file(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = BertForMaskedLM(__SCREAMING_SNAKE_CASE )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
lowercase_ : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
lowercase_ : BertSelfAttention = layer.attention.self
lowercase_ : str = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_query_dense/kernel' , self_attn.query.weight.data.shape )
lowercase_ : Tuple = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_query_dense/bias' , self_attn.query.bias.data.shape )
lowercase_ : Tuple = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_key_dense/kernel' , self_attn.key.weight.data.shape )
lowercase_ : int = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_key_dense/bias' , self_attn.key.bias.data.shape )
lowercase_ : Dict = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_value_dense/kernel' , self_attn.value.weight.data.shape )
lowercase_ : List[Any] = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_value_dense/bias' , self_attn.value.bias.data.shape )
# Self-attention Output
lowercase_ : BertSelfOutput = layer.attention.output
lowercase_ : Dict = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_output_dense/kernel' , self_output.dense.weight.data.shape )
lowercase_ : Any = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_output_dense/bias' , self_output.dense.bias.data.shape )
lowercase_ : Tuple = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/gamma' )
lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/beta' )
# Intermediate
lowercase_ : BertIntermediate = layer.intermediate
lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/kernel' )
lowercase_ : Optional[int] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/bias' )
# Output
lowercase_ : BertOutput = layer.output
lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/kernel' )
lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/bias' )
lowercase_ : List[str] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/gamma' )
lowercase_ : int = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/beta' )
# Embeddings
lowercase_ : Optional[Any] = get_encoder_array('_position_embedding_layer/embeddings' )
lowercase_ : int = get_encoder_array('_type_embedding_layer/embeddings' )
lowercase_ : Any = get_encoder_array('_embedding_norm_layer/gamma' )
lowercase_ : Optional[Any] = get_encoder_array('_embedding_norm_layer/beta' )
# LM Head
lowercase_ : int = model.cls.predictions.transform
lowercase_ : str = get_masked_lm_array('dense/kernel' )
lowercase_ : Optional[Any] = get_masked_lm_array('dense/bias' )
lowercase_ : Optional[Any] = get_masked_lm_array('layer_norm/gamma' )
lowercase_ : Optional[int] = get_masked_lm_array('layer_norm/beta' )
lowercase_ : List[str] = get_masked_lm_array('embedding_table' )
# Pooling
lowercase_ : Optional[Any] = BertPooler(config=__SCREAMING_SNAKE_CASE )
lowercase_ : BertPooler = get_encoder_array('_pooler_layer/kernel' )
lowercase_ : BertPooler = get_encoder_array('_pooler_layer/bias' )
# Export final model
model.save_pretrained(__SCREAMING_SNAKE_CASE )
# Integration test - should load without any errors ;)
lowercase_ : Tuple = BertForMaskedLM.from_pretrained(__SCREAMING_SNAKE_CASE )
print(new_model.eval() )
print('Model conversion was done sucessfully!' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 321 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
a__ = logging.get_logger(__name__)
a__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all MVP models at https://huggingface.co/models?filter=mvp
a__ = {
"""vocab_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json""",
},
"""added_tokens.json""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json""",
},
"""merges_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json""",
},
}
a__ = {
"""RUCAIBox/mvp""": 10_24,
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Dict = VOCAB_FILES_NAMES
snake_case_ : Any = PRETRAINED_VOCAB_FILES_MAP
snake_case_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ : List[str] = ["""input_ids""", """attention_mask"""]
snake_case_ : int = MvpTokenizer
def __init__( self : Optional[int] , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Any=None , lowerCAmelCase : Dict="replace" , lowerCAmelCase : Dict="<s>" , lowerCAmelCase : Dict="</s>" , lowerCAmelCase : List[Any]="</s>" , lowerCAmelCase : int="<s>" , lowerCAmelCase : Dict="<unk>" , lowerCAmelCase : Optional[int]="<pad>" , lowerCAmelCase : Any="<mask>" , lowerCAmelCase : int=False , lowerCAmelCase : Dict=True , **lowerCAmelCase : List[Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(
lowerCAmelCase , lowerCAmelCase , tokenizer_file=lowerCAmelCase , errors=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase , **lowerCAmelCase , )
_snake_case : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("""add_prefix_space""" , lowerCAmelCase) != add_prefix_space:
_snake_case : List[Any] = getattr(lowerCAmelCase , pre_tok_state.pop("""type"""))
_snake_case : Dict = add_prefix_space
_snake_case : int = pre_tok_class(**lowerCAmelCase)
_snake_case : Tuple = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_snake_case : Tuple = """post_processor"""
_snake_case : Any = getattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase)
if tokenizer_component_instance:
_snake_case : List[Any] = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_snake_case : Any = tuple(state["""sep"""])
if "cls" in state:
_snake_case : int = tuple(state["""cls"""])
_snake_case : Union[str, Any] = False
if state.get("""add_prefix_space""" , lowerCAmelCase) != add_prefix_space:
_snake_case : Optional[int] = add_prefix_space
_snake_case : List[Any] = True
if state.get("""trim_offsets""" , lowerCAmelCase) != trim_offsets:
_snake_case : int = trim_offsets
_snake_case : Union[str, Any] = True
if changes_to_apply:
_snake_case : Dict = getattr(lowerCAmelCase , state.pop("""type"""))
_snake_case : Union[str, Any] = component_class(**lowerCAmelCase)
setattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase)
@property
def UpperCamelCase_ ( self : int) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""")
return None
return str(self._mask_token)
@mask_token.setter
def UpperCamelCase_ ( self : Any , lowerCAmelCase : List[Any]) -> Tuple:
"""simple docstring"""
_snake_case : Any = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else value
_snake_case : Any = value
def UpperCamelCase_ ( self : int , *lowerCAmelCase : Dict , **lowerCAmelCase : Optional[int]) -> BatchEncoding:
"""simple docstring"""
_snake_case : List[str] = kwargs.get("""is_split_into_words""" , lowerCAmelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""")
return super()._batch_encode_plus(*lowerCAmelCase , **lowerCAmelCase)
def UpperCamelCase_ ( self : List[Any] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Tuple) -> BatchEncoding:
"""simple docstring"""
_snake_case : Optional[int] = kwargs.get("""is_split_into_words""" , lowerCAmelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""")
return super()._encode_plus(*lowerCAmelCase , **lowerCAmelCase)
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
_snake_case : int = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase)
return tuple(lowerCAmelCase)
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : int , lowerCAmelCase : str=None) -> int:
"""simple docstring"""
_snake_case : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_snake_case : str = [self.sep_token_id]
_snake_case : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 317 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a__ = logging.get_logger(__name__)
a__ = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class snake_case ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = """swin"""
snake_case_ : Optional[Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : str , lowerCAmelCase : Optional[int]=224 , lowerCAmelCase : int=4 , lowerCAmelCase : Any=3 , lowerCAmelCase : int=96 , lowerCAmelCase : Optional[Any]=[2, 2, 6, 2] , lowerCAmelCase : Optional[Any]=[3, 6, 12, 24] , lowerCAmelCase : Tuple=7 , lowerCAmelCase : List[Any]=4.0 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : Tuple="gelu" , lowerCAmelCase : Any=False , lowerCAmelCase : Union[str, Any]=0.02 , lowerCAmelCase : int=1E-5 , lowerCAmelCase : Optional[Any]=32 , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Dict=None , **lowerCAmelCase : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
_snake_case : int = image_size
_snake_case : Any = patch_size
_snake_case : Union[str, Any] = num_channels
_snake_case : int = embed_dim
_snake_case : Dict = depths
_snake_case : Dict = len(lowerCAmelCase)
_snake_case : Optional[Any] = num_heads
_snake_case : Tuple = window_size
_snake_case : int = mlp_ratio
_snake_case : Any = qkv_bias
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : List[str] = attention_probs_dropout_prob
_snake_case : Optional[Any] = drop_path_rate
_snake_case : List[Any] = hidden_act
_snake_case : str = use_absolute_embeddings
_snake_case : Tuple = layer_norm_eps
_snake_case : Any = initializer_range
_snake_case : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_snake_case : Dict = int(embed_dim * 2 ** (len(lowerCAmelCase) - 1))
_snake_case : Optional[Any] = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(lowerCAmelCase) + 1)]
_snake_case , _snake_case : List[str] = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase , out_indices=lowerCAmelCase , stage_names=self.stage_names)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = version.parse("""1.11""" )
@property
def UpperCamelCase_ ( self : Dict) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def UpperCamelCase_ ( self : Dict) -> float:
"""simple docstring"""
return 1E-4
| 317 | 1 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : Dict ) -> List[str]:
if not head:
return True
# split the list to two parts
_a , _a = head.next, head
while fast and fast.next:
_a = fast.next.next
_a = slow.next
_a = slow.next
_a = None # Don't forget here! But forget still works!
# reverse the second part
_a = None
while second:
_a = second.next
_a = node
_a = second
_a = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
_a = node.next
_a = head.next
return True
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Optional[Any]:
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
_a = _a = _a = head
while fast and fast.next:
_a , _a = fast.next.next, slow.next
# 2. Push the second half into the stack
_a = [slow.val]
while slow.next:
_a = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
_a = cur.next
return True
def _lowerCamelCase ( lowercase : Optional[int] ) -> Any:
if not head or not head.next:
return True
_a = {}
_a = 0
while head:
if head.val in d:
d[head.val].append(lowercase )
else:
_a = [pos]
_a = head.next
pos += 1
_a = pos - 1
_a = 0
for v in d.values():
if len(lowercase ) % 2 != 0:
middle += 1
else:
_a = 0
for i in range(0 , len(lowercase ) ):
if v[i] + v[len(lowercase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 346 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
lowerCAmelCase_ : Tuple = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def _lowerCamelCase ( lowercase : List[Any] ) -> Optional[int]:
_a = test_results.split(" " )
_a = 0
_a = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_a = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _lowerCamelCase ( lowercase : str ) -> Optional[Any]:
_a = {}
_a = None
_a = False
for line in failures_short_lines.split("\n" ):
if re.search(r"_ \[doctest\]" , lowercase ):
_a = True
_a = line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
_a = line
_a = False
return failures
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Tuple , __a : str , __a : Dict ):
_a = title
_a = doc_test_results["time_spent"].split("," )[0]
_a = doc_test_results["success"]
_a = doc_test_results["failures"]
_a = self.n_success + self.n_failures
# Failures and success of the modeling tests
_a = doc_test_results
@property
def UpperCamelCase__ ( self : int ):
_a = [self._time_spent]
_a = 0
for time in time_spent:
_a = time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__a ) == 1:
_a = [0, 0, time_parts[0]]
_a , _a , _a = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
_a , _a , _a = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f'{int(__a )}h{int(__a )}m{int(__a )}s'
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def UpperCamelCase__ ( self : List[str] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
f' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def UpperCamelCase__ ( self : str ):
_a = 40
_a = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(__a , __a )}
_a = ""
for category, failures in category_failures.items():
if len(__a ) == 0:
continue
if report != "":
report += "\n\n"
report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__a )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def UpperCamelCase__ ( self : List[str] ):
_a = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__a )
@staticmethod
def UpperCamelCase__ ( ):
_a = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(__a )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=__a , )
def UpperCamelCase__ ( self : Tuple ):
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
_a = f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else "All tests passed."
_a = client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=__a , )
def UpperCamelCase__ ( self : Dict , __a : List[str] , __a : List[Any] , __a : Tuple , __a : int ):
_a = ""
for key, value in failures.items():
_a = value[:2_00] + " [Truncated]" if len(__a ) > 2_50 else value
failures_text += f'*{key}*\n_{value}_\n\n'
_a = job_name
_a = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
_a = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def UpperCamelCase__ ( self : str ):
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
_a = self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
_a = sorted(self.doc_test_results.items() , key=lambda __a : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
_a = f'*Num failures* :{len(job_result["failed"] )} \n'
_a = job_result["failures"]
_a = self.get_reply_blocks(__a , __a , __a , text=__a )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=f'Results for {job}' , blocks=__a , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def _lowerCamelCase ( ) -> Any:
_a = os.environ["GITHUB_RUN_ID"]
_a = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
_a = requests.get(lowercase ).json()
_a = {}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_a = math.ceil((result["total_count"] - 100) / 100 )
for i in range(lowercase ):
_a = requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , lowercase )
return {}
def _lowerCamelCase ( lowercase : str ) -> Dict:
_a = {}
if os.path.exists(lowercase ):
_a = os.listdir(lowercase )
for file in files:
try:
with open(os.path.join(lowercase , lowercase ) , encoding="utf-8" ) as f:
_a = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowercase , lowercase )}.' ) from e
return _artifact
def _lowerCamelCase ( ) -> str:
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Dict , __a : str ):
_a = name
_a = []
def __str__( self : List[str] ):
return self.name
def UpperCamelCase__ ( self : str , __a : str ):
self.paths.append({"name": self.name, "path": path} )
_a = {}
_a = filter(os.path.isdir , os.listdir() )
for directory in directories:
_a = directory
if artifact_name not in _available_artifacts:
_a = Artifact(lowercase )
_available_artifacts[artifact_name].add_path(lowercase )
return _available_artifacts
if __name__ == "__main__":
lowerCAmelCase_ : List[Any] = get_job_links()
lowerCAmelCase_ : Any = retrieve_available_artifacts()
lowerCAmelCase_ : List[str] = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
lowerCAmelCase_ : Optional[Any] = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
lowerCAmelCase_ : int = github_actions_job_links.get('run_doctests')
lowerCAmelCase_ : Union[str, Any] = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
lowerCAmelCase_ : List[str] = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = handle_test_results(artifact['stats'])
lowerCAmelCase_ : List[str] = failed
lowerCAmelCase_ : Optional[Any] = success
lowerCAmelCase_ : Tuple = time_spent[1:-1] + ', '
lowerCAmelCase_ : List[Any] = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
lowerCAmelCase_ : int = line.replace('FAILED ', '')
lowerCAmelCase_ : Optional[int] = line.split()[0].replace('\n', '')
if "::" in line:
lowerCAmelCase_ , lowerCAmelCase_ : str = line.split('::')
else:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
lowerCAmelCase_ : Union[str, Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
lowerCAmelCase_ : List[str] = all_failures[test] if test in all_failures else 'N/A'
lowerCAmelCase_ : Optional[Any] = failure
break
lowerCAmelCase_ : Tuple = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 346 | 1 |
"""simple docstring"""
from __future__ import annotations
from PIL import Image
# Define glider example
UpperCAmelCase__ : List[str] = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
UpperCAmelCase__ : Optional[int] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for i in range(len(_snake_case ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
SCREAMING_SNAKE_CASE__ : int = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(_snake_case ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(_snake_case ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(_snake_case ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
SCREAMING_SNAKE_CASE__ : Tuple = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(_snake_case )
return next_generation
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for _ in range(_snake_case ):
# Create output image
SCREAMING_SNAKE_CASE__ : Optional[Any] = Image.new("""RGB""" ,(len(cells[0] ), len(_snake_case )) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = img.load()
# Save cells to image
for x in range(len(_snake_case ) ):
for y in range(len(cells[0] ) ):
SCREAMING_SNAKE_CASE__ : Optional[int] = 255 - cells[y][x] * 255
SCREAMING_SNAKE_CASE__ : str = (colour, colour, colour)
# Save image
images.append(_snake_case )
SCREAMING_SNAKE_CASE__ : Dict = new_generation(_snake_case )
return images
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = generate_images(GLIDER, 1_6)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 25 |
'''simple docstring'''
from __future__ import annotations
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =[True] * limit
__lowercase =False
__lowercase =False
__lowercase =True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
__lowercase =i * 2
while index < limit:
__lowercase =False
__lowercase =index + i
__lowercase =[2]
for i in range(3 , _lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCAmelCase )
return primes
def _A ( _lowerCAmelCase = 1_000_000 ):
"""simple docstring"""
__lowercase =prime_sieve(_lowerCAmelCase )
__lowercase =0
__lowercase =0
for i in range(len(_lowerCAmelCase ) ):
for j in range(i + length , len(_lowerCAmelCase ) ):
__lowercase =sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
__lowercase =j - i
__lowercase =sol
return largest
if __name__ == "__main__":
print(f"{solution() = }")
| 166 | 0 |
'''simple docstring'''
from torch import nn
def UpperCamelCase( UpperCAmelCase_ ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"""Unsupported activation function: {act_fn}""" )
| 280 |
'''simple docstring'''
import argparse
import json
from tqdm import tqdm
def UpperCamelCase( ):
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=UpperCAmelCase_ , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=UpperCAmelCase_ , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=UpperCAmelCase_ , help='where to store parsed gold_data_path file' , )
UpperCAmelCase : int = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
UpperCAmelCase : int = json.load(UpperCAmelCase_ )
for dpr_record in tqdm(UpperCAmelCase_ ):
UpperCAmelCase : Any = dpr_record['question']
UpperCAmelCase : List[str] = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(UpperCAmelCase_ ) + '\n' )
if __name__ == "__main__":
main()
| 280 | 1 |
"""simple docstring"""
from __future__ import annotations
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61 |
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : str = checkpoint
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : Optional[int] = vae_state_dict["encoder.conv_in.weight"]
UpperCAmelCase_ : List[str] = vae_state_dict["encoder.conv_in.bias"]
UpperCAmelCase_ : Optional[Any] = vae_state_dict["encoder.conv_out.weight"]
UpperCAmelCase_ : Optional[int] = vae_state_dict["encoder.conv_out.bias"]
UpperCAmelCase_ : List[Any] = vae_state_dict["encoder.norm_out.weight"]
UpperCAmelCase_ : Union[str, Any] = vae_state_dict["encoder.norm_out.bias"]
UpperCAmelCase_ : Any = vae_state_dict["decoder.conv_in.weight"]
UpperCAmelCase_ : int = vae_state_dict["decoder.conv_in.bias"]
UpperCAmelCase_ : Any = vae_state_dict["decoder.conv_out.weight"]
UpperCAmelCase_ : Tuple = vae_state_dict["decoder.conv_out.bias"]
UpperCAmelCase_ : List[Any] = vae_state_dict["decoder.norm_out.weight"]
UpperCAmelCase_ : Tuple = vae_state_dict["decoder.norm_out.bias"]
UpperCAmelCase_ : str = vae_state_dict["quant_conv.weight"]
UpperCAmelCase_ : Optional[Any] = vae_state_dict["quant_conv.bias"]
UpperCAmelCase_ : List[str] = vae_state_dict["post_quant_conv.weight"]
UpperCAmelCase_ : List[Any] = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
UpperCAmelCase_ : Optional[Any] = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
UpperCAmelCase_ : Optional[Any] = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(__lowerCamelCase )
}
# Retrieves the keys for the decoder up blocks only
UpperCAmelCase_ : Dict = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
UpperCAmelCase_ : Optional[int] = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(__lowerCamelCase )
}
for i in range(__lowerCamelCase ):
UpperCAmelCase_ : Any = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ : Dict = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
UpperCAmelCase_ : Dict = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
UpperCAmelCase_ : List[str] = renew_vae_resnet_paths(__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = {"old": f"""down.{i}.block""", "new": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, additional_replacements=[meta_path], config=__lowerCamelCase )
UpperCAmelCase_ : List[Any] = [key for key in vae_state_dict if "encoder.mid.block" in key]
UpperCAmelCase_ : Tuple = 2
for i in range(1, num_mid_res_blocks + 1 ):
UpperCAmelCase_ : str = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
UpperCAmelCase_ : List[Any] = renew_vae_resnet_paths(__lowerCamelCase )
UpperCAmelCase_ : Optional[int] = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, additional_replacements=[meta_path], config=__lowerCamelCase )
UpperCAmelCase_ : Optional[int] = [key for key in vae_state_dict if "encoder.mid.attn" in key]
UpperCAmelCase_ : Union[str, Any] = renew_vae_attention_paths(__lowerCamelCase )
UpperCAmelCase_ : int = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, additional_replacements=[meta_path], config=__lowerCamelCase )
conv_attn_to_linear(__lowerCamelCase )
for i in range(__lowerCamelCase ):
UpperCAmelCase_ : Optional[Any] = num_up_blocks - 1 - i
UpperCAmelCase_ : Any = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ : str = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
UpperCAmelCase_ : Optional[Any] = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
UpperCAmelCase_ : Dict = renew_vae_resnet_paths(__lowerCamelCase )
UpperCAmelCase_ : List[str] = {"old": f"""up.{block_id}.block""", "new": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, additional_replacements=[meta_path], config=__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = [key for key in vae_state_dict if "decoder.mid.block" in key]
UpperCAmelCase_ : List[Any] = 2
for i in range(1, num_mid_res_blocks + 1 ):
UpperCAmelCase_ : str = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
UpperCAmelCase_ : Tuple = renew_vae_resnet_paths(__lowerCamelCase )
UpperCAmelCase_ : Tuple = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, additional_replacements=[meta_path], config=__lowerCamelCase )
UpperCAmelCase_ : Optional[int] = [key for key in vae_state_dict if "decoder.mid.attn" in key]
UpperCAmelCase_ : Any = renew_vae_attention_paths(__lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, additional_replacements=[meta_path], config=__lowerCamelCase )
conv_attn_to_linear(__lowerCamelCase )
return new_checkpoint
def __a ( __lowerCamelCase, __lowerCamelCase, ):
# Only support V1
UpperCAmelCase_ : List[str] = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
UpperCAmelCase_ : List[Any] = io.BytesIO(r.content )
UpperCAmelCase_ : Any = OmegaConf.load(__lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = 512
UpperCAmelCase_ : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
UpperCAmelCase_ : int = {}
with safe_open(__lowerCamelCase, framework="pt", device="cpu" ) as f:
for key in f.keys():
UpperCAmelCase_ : Tuple = f.get_tensor(__lowerCamelCase )
else:
UpperCAmelCase_ : Any = torch.load(__lowerCamelCase, map_location=__lowerCamelCase )["state_dict"]
# Convert the VAE model.
UpperCAmelCase_ : Dict = create_vae_diffusers_config(__lowerCamelCase, image_size=__lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = custom_convert_ldm_vae_checkpoint(__lowerCamelCase, __lowerCamelCase )
UpperCAmelCase_ : int = AutoencoderKL(**__lowerCamelCase )
vae.load_state_dict(__lowerCamelCase )
vae.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
_a = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 61 | 1 |
def UpperCamelCase ( snake_case__ : int , snake_case__ : int ) -> str:
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCamelCase : Optional[int] = str(bin(snake_case__ ) )[2:] # remove the leading "0b"
UpperCamelCase : List[str] = str(bin(snake_case__ ) )[2:] # remove the leading "0b"
UpperCamelCase : Any = max(len(snake_case__ ) , len(snake_case__ ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(snake_case__ ) , b_binary.zfill(snake_case__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 103 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCAmelCase = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCamelCase ( snake_case__ : Dict ) -> List[str]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case__ )
def UpperCamelCase ( snake_case__ : List[Any] ) -> Dict:
from transformers.testing_utils import pytest_terminal_summary_main
UpperCamelCase : Optional[int] = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(snake_case__ , id=snake_case__ )
| 103 | 1 |
import csv
import tweepy
# Twitter API credentials
A_ :int = ''''''
A_ :str = ''''''
A_ :Dict = ''''''
A_ :Optional[int] = ''''''
def A ( a_ ) -> None:
# authorize twitter, initialize tweepy
__UpperCamelCase : str =tweepy.OAuthHandler(a_ ,a_ )
auth.set_access_token(a_ ,a_ )
__UpperCamelCase : Optional[int] =tweepy.API(a_ )
# initialize a list to hold all the tweepy Tweets
__UpperCamelCase : Optional[Any] =[]
# make initial request for most recent tweets (200 is the maximum allowed count)
__UpperCamelCase : str =api.user_timeline(screen_name=a_ ,count=200 )
# save most recent tweets
alltweets.extend(a_ )
# save the id of the oldest tweet less one
__UpperCamelCase : Any =alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(a_ ) > 0:
print(F'getting tweets before {oldest}' )
# all subsequent requests use the max_id param to prevent duplicates
__UpperCamelCase : Optional[int] =api.user_timeline(
screen_name=a_ ,count=200 ,max_id=a_ )
# save most recent tweets
alltweets.extend(a_ )
# update the id of the oldest tweet less one
__UpperCamelCase : Optional[Any] =alltweets[-1].id - 1
print(F'...{len(a_ )} tweets downloaded so far' )
# transform the tweepy tweets into a 2D array that will populate the csv
__UpperCamelCase : Dict =[[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'new_{screen_name}_tweets.csv' ,'w' ) as f:
__UpperCamelCase : Optional[int] =csv.writer(a_ )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(a_ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 71 | """simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = data
snake_case = None
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
snake_case = None
snake_case = None
def __iter__( self ):
"""simple docstring"""
snake_case = self.head
while self.head:
yield node.data
snake_case = node.next
if node == self.head:
break
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ):
"""simple docstring"""
return "->".join(str(lowerCAmelCase ) for item in iter(self ) )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
self.insert_nth(len(self ) , lowerCAmelCase )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
self.insert_nth(0 , lowerCAmelCase )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if index < 0 or index > len(self ):
raise IndexError('list index out of range.' )
snake_case = Node(lowerCAmelCase )
if self.head is None:
snake_case = new_node # first node points itself
snake_case = snake_case = new_node
elif index == 0: # insert at head
snake_case = self.head
snake_case = snake_case = new_node
else:
snake_case = self.head
for _ in range(index - 1 ):
snake_case = temp.next
snake_case = temp.next
snake_case = new_node
if index == len(self ) - 1: # insert at tail
snake_case = new_node
def snake_case ( self ):
"""simple docstring"""
return self.delete_nth(0 )
def snake_case ( self ):
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def snake_case ( self , lowerCAmelCase = 0 ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise IndexError('list index out of range.' )
snake_case = self.head
if self.head == self.tail: # just one node
snake_case = snake_case = None
elif index == 0: # delete head node
snake_case = self.tail.next.next
snake_case = self.head.next
else:
snake_case = self.head
for _ in range(index - 1 ):
snake_case = temp.next
snake_case = temp.next
snake_case = temp.next.next
if index == len(self ) - 1: # delete at tail
snake_case = temp
return delete_node.data
def snake_case ( self ):
"""simple docstring"""
return len(self ) == 0
def lowerCAmelCase__ ( ) -> None:
"""simple docstring"""
snake_case = CircularLinkedList()
assert len(_UpperCamelCase ) == 0
assert circular_linked_list.is_empty() is True
assert str(_UpperCamelCase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(_UpperCamelCase ) == i
circular_linked_list.insert_nth(_UpperCamelCase , i + 1 )
assert str(_UpperCamelCase ) == "->".join(str(_UpperCamelCase ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(_UpperCamelCase ) == "->".join(str(_UpperCamelCase ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(_UpperCamelCase ) == "->".join(str(_UpperCamelCase ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(_UpperCamelCase ) == "->".join(str(_UpperCamelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(_UpperCamelCase ) == "->".join(str(_UpperCamelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 150 | 0 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
def __init__( self : Dict , **lowerCamelCase_ : Any ):
"""simple docstring"""
requires_backends(self , ["""bs4"""] )
super().__init__(**lowerCamelCase_ )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCamelCase = parent.find_all(child.name , recursive=lowerCamelCase_ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(lowerCamelCase_ ) else next(i for i, s in enumerate(lowerCamelCase_ , 1 ) if s is child ) )
UpperCamelCase = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = BeautifulSoup(lowerCamelCase_ , """html.parser""" )
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
for element in html_code.descendants:
if type(lowerCamelCase_ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCamelCase = html.unescape(lowerCamelCase_ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(lowerCamelCase_ )
UpperCamelCase , UpperCamelCase = self.xpath_soup(lowerCamelCase_ )
stringaxtag_seq.append(lowerCamelCase_ )
stringaxsubs_seq.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ):
raise ValueError("""Number of doc strings and xtags does not correspond""" )
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ):
raise ValueError("""Number of doc strings and xsubs does not correspond""" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = """"""
for tagname, subs in zip(lowerCamelCase_ , lowerCamelCase_ ):
xpath += f"""/{tagname}"""
if subs != 0:
xpath += f"""[{subs}]"""
return xpath
def __call__( self : str , lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = False
# Check that strings has a valid type
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCamelCase = True
elif isinstance(lowerCamelCase_ , (list, tuple) ):
if len(lowerCamelCase_ ) == 0 or isinstance(html_strings[0] , lowerCamelCase_ ):
UpperCamelCase = True
if not valid_strings:
raise ValueError(
"""HTML strings must of type `str`, `List[str]` (batch of examples), """
f"""but is of type {type(lowerCamelCase_ )}.""" )
UpperCamelCase = bool(isinstance(lowerCamelCase_ , (list, tuple) ) and (isinstance(html_strings[0] , lowerCamelCase_ )) )
if not is_batched:
UpperCamelCase = [html_strings]
# Get nodes + xpaths
UpperCamelCase = []
UpperCamelCase = []
for html_string in html_strings:
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.get_three_from_single(lowerCamelCase_ )
nodes.append(lowerCamelCase_ )
UpperCamelCase = []
for node, tag_list, sub_list in zip(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
UpperCamelCase = self.construct_xpath(lowerCamelCase_ , lowerCamelCase_ )
xpath_strings.append(lowerCamelCase_ )
xpaths.append(lowerCamelCase_ )
# return as Dict
UpperCamelCase = {"""nodes""": nodes, """xpaths""": xpaths}
UpperCamelCase = BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ )
return encoded_inputs
| 366 | import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
_SCREAMING_SNAKE_CASE = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
_SCREAMING_SNAKE_CASE = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print("""\n""".join(upper_files) + """\n""")
_SCREAMING_SNAKE_CASE = [file for file in filepaths if """ """ in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print("""\n""".join(space_files) + """\n""")
_SCREAMING_SNAKE_CASE = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print("""\n""".join(hyphen_files) + """\n""")
_SCREAMING_SNAKE_CASE = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print("""\n""".join(nodir_files) + """\n""")
_SCREAMING_SNAKE_CASE = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 165 | 0 |
from typing import List
from .keymap import KEYMAP, get_character
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple:
def decorator(lowercase ):
snake_case : Tuple = getattr(lowercase ,"""handle_key""" ,[] )
handle += [key]
setattr(lowercase ,"""handle_key""" ,lowercase )
return func
return decorator
def SCREAMING_SNAKE_CASE__ ( *lowercase ) -> List[str]:
def decorator(lowercase ):
snake_case : Optional[int] = getattr(lowercase ,"""handle_key""" ,[] )
handle += keys
setattr(lowercase ,"""handle_key""" ,lowercase )
return func
return decorator
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __new__( cls , A , A , A ) -> str:
snake_case : int = super().__new__(cls , A , A , A )
if not hasattr(A , """key_handler""" ):
setattr(A , """key_handler""" , {} )
setattr(A , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
snake_case : Dict = getattr(A , """handle_key""" , [] )
for key in handled_keys:
snake_case : Any = value
return new_cls
@staticmethod
def UpperCAmelCase ( cls ) -> List[str]:
snake_case : Tuple = get_character()
if char != KEYMAP["undefined"]:
snake_case : str = ord(A )
snake_case : Optional[Any] = cls.key_handler.get(A )
if handler:
snake_case : Optional[Any] = char
return handler(cls )
else:
return None
def SCREAMING_SNAKE_CASE__ ( cls ) -> Tuple:
return KeyHandler(cls.__name__ ,cls.__bases__ ,cls.__dict__.copy() )
| 124 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def __init__( self , A , A=7 , A=3 , A=3_0 , A=4_0_0 , A=True , A=None , A=0.9 , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , ) -> Dict:
snake_case : Optional[int] = size if size is not None else {"""shortest_edge""": 3_0}
snake_case : Optional[int] = crop_size if crop_size is not None else {"""height""": 3_0, """width""": 3_0}
snake_case : int = parent
snake_case : List[str] = batch_size
snake_case : Any = num_channels
snake_case : Optional[Any] = min_resolution
snake_case : Any = max_resolution
snake_case : Dict = do_resize_and_center_crop
snake_case : Any = size
snake_case : List[Any] = crop_pct
snake_case : int = crop_size
snake_case : int = do_normalize
snake_case : List[Any] = image_mean
snake_case : Tuple = image_std
def UpperCAmelCase ( self ) -> int:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowercase (UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = PoolFormerImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : str = PoolFormerImageProcessingTester(self )
@property
def UpperCAmelCase ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ) -> Dict:
snake_case : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(A , """size""" ) )
self.assertTrue(hasattr(A , """crop_pct""" ) )
self.assertTrue(hasattr(A , """do_normalize""" ) )
self.assertTrue(hasattr(A , """image_mean""" ) )
self.assertTrue(hasattr(A , """image_std""" ) )
def UpperCAmelCase ( self ) -> int:
snake_case : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 3_0} )
self.assertEqual(image_processor.crop_size , {"""height""": 3_0, """width""": 3_0} )
snake_case : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} )
self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} )
def UpperCAmelCase ( self ) -> Tuple:
pass
def UpperCAmelCase ( self ) -> List[Any]:
# Initialize image_processing
snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
snake_case : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case : Tuple = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase ( self ) -> Dict:
# Initialize image_processing
snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
snake_case : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case : Any = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase ( self ) -> List[str]:
# Initialize image_processing
snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case : int = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 124 | 1 |
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class _lowerCAmelCase( a_ ):
"""simple docstring"""
def __lt__( self , _lowerCamelCase ):
return self[-1] < other[-1]
def __eq__( self , _lowerCamelCase ):
return self[-1] == other[-1]
def snake_case (UpperCAmelCase__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase_: int = []
# sort into stacks
for element in collection:
UpperCamelCase_: List[Any] = Stack([element] )
UpperCamelCase_: Optional[int] = bisect_left(lowerCamelCase__ , lowerCamelCase__ )
if i != len(lowerCamelCase__ ):
stacks[i].append(lowerCamelCase__ )
else:
stacks.append(lowerCamelCase__ )
# use a heap-based merge to merge stack efficiently
UpperCamelCase_: Tuple = merge(*(reversed(lowerCamelCase__ ) for stack in stacks) )
return collection
if __name__ == "__main__":
A_ : int = input('Enter numbers separated by a comma:\n').strip()
A_ : Union[str, Any] = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted)) | 350 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def snake_case (UpperCAmelCase__ ) -> tuple:
return (data["data"], data["target"])
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> np.ndarray:
UpperCamelCase_: Dict = XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(UpperCAmelCase__ , UpperCAmelCase__ )
# Predict target for test data
UpperCamelCase_: int = xgb.predict(UpperCAmelCase__ )
UpperCamelCase_: Any = predictions.reshape(len(UpperCAmelCase__ ) , 1 )
return predictions
def snake_case () -> None:
UpperCamelCase_: Union[str, Any] = fetch_california_housing()
UpperCamelCase_ ,UpperCamelCase_: Tuple = data_handling(UpperCAmelCase__ )
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Optional[Any] = train_test_split(
UpperCAmelCase__ , UpperCAmelCase__ , test_size=0.25 , random_state=1 )
UpperCamelCase_: Union[str, Any] = xgboost(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(UpperCAmelCase__ , UpperCAmelCase__ )}''' )
print(F'''Mean Square Error : {mean_squared_error(UpperCAmelCase__ , UpperCAmelCase__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main() | 292 | 0 |
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class _UpperCAmelCase :
'''simple docstring'''
pass | 286 |
"""simple docstring"""
import qiskit
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Tuple = qiskit.Aer.get_backend('aer_simulator' )
A_ : str = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
A_ : Optional[Any] = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
lowerCamelCase_ : List[str] = half_adder(1, 1)
print(F"Half Adder Output Qubit Counts: {counts}") | 286 | 1 |
import numpy as np
from PIL import Image
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> np.ndarray:
UpperCamelCase__ : List[Any] = np.array(__lowerCAmelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
UpperCamelCase__ : Any = 0
UpperCamelCase__ : List[str] = 0
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ : List[Any] = 0
# compute the shape of the output matrix
UpperCamelCase__ : Any = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
UpperCamelCase__ : List[Any] = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
UpperCamelCase__ : str = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : Dict = 0
return updated_arr
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> np.ndarray:
UpperCamelCase__ : List[Any] = np.array(__lowerCAmelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
UpperCamelCase__ : Any = 0
UpperCamelCase__ : str = 0
UpperCamelCase__ : Any = 0
UpperCamelCase__ : int = 0
# compute the shape of the output matrix
UpperCamelCase__ : Union[str, Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
UpperCamelCase__ : int = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
UpperCamelCase__ : int = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCamelCase__ : Optional[Any] = 0
UpperCamelCase__ : str = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
lowerCamelCase : Optional[int] =Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show() | 196 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase : List[Any] =logging.get_logger(__name__)
lowerCamelCase : Optional[Any] ={
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class __a ( A__ , A__ ):
_lowerCAmelCase : Union[str, Any] = '''bit'''
_lowerCAmelCase : List[str] = ['''preactivation''', '''bottleneck''']
_lowerCAmelCase : Any = ['''SAME''', '''VALID''']
def __init__( self : str , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : str=64 , SCREAMING_SNAKE_CASE : List[Any]=[2_56, 5_12, 10_24, 20_48] , SCREAMING_SNAKE_CASE : Union[str, Any]=[3, 4, 6, 3] , SCREAMING_SNAKE_CASE : str="preactivation" , SCREAMING_SNAKE_CASE : Any="relu" , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : List[Any]=32 , SCREAMING_SNAKE_CASE : Tuple=0.0 , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : int=32 , SCREAMING_SNAKE_CASE : str=1 , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Optional[int]=None , **SCREAMING_SNAKE_CASE : int , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
UpperCamelCase__ : Any = global_padding.upper()
else:
raise ValueError(F'Padding strategy {global_padding} not supported' )
UpperCamelCase__ : Dict = num_channels
UpperCamelCase__ : Dict = embedding_size
UpperCamelCase__ : Tuple = hidden_sizes
UpperCamelCase__ : Any = depths
UpperCamelCase__ : Optional[int] = layer_type
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : str = global_padding
UpperCamelCase__ : Any = num_groups
UpperCamelCase__ : str = drop_path_rate
UpperCamelCase__ : Optional[Any] = embedding_dynamic_padding
UpperCamelCase__ : Tuple = output_stride
UpperCamelCase__ : List[str] = width_factor
UpperCamelCase__ : Any = ["stem"] + [F'stage{idx}' for idx in range(1 , len(SCREAMING_SNAKE_CASE ) + 1 )]
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE , out_indices=SCREAMING_SNAKE_CASE , stage_names=self.stage_names ) | 196 | 1 |
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] ):
if n_term == "":
return []
snake_case : list = []
for temp in range(int(__lowerCamelCase ) ):
series.append(f"""1/{temp + 1}""" if series else "1" )
return series
if __name__ == "__main__":
__lowerCamelCase = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 59 |
def snake_case_ ( snake_case , snake_case ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
lowercase__: str = str(bin(snake_case ) )
binary_number += "0" * shift_amount
return binary_number
def snake_case_ ( snake_case , snake_case ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
lowercase__: Optional[Any] = str(bin(snake_case ) )[2:]
if shift_amount >= len(snake_case ):
return "0b0"
lowercase__: Optional[int] = binary_number[: len(snake_case ) - shift_amount]
return "0b" + shifted_binary_number
def snake_case_ ( snake_case , snake_case ) -> str:
if number >= 0: # Get binary representation of positive number
lowercase__: Union[str, Any] = '0' + str(bin(snake_case ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
lowercase__: Dict = len(bin(snake_case )[3:] ) # Find 2's complement of number
lowercase__: int = bin(abs(snake_case ) - (1 << binary_number_length) )[3:]
lowercase__: Any = (
'1' + '0' * (binary_number_length - len(snake_case )) + binary_number
)
if shift_amount >= len(snake_case ):
return "0b" + binary_number[0] * len(snake_case )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(snake_case ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196 | 0 |
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE_ ='''OwlViTImageProcessor'''
SCREAMING_SNAKE_CASE_ =('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : List[Any] , snake_case__ : str=None , snake_case__ : Tuple=None , **snake_case__ : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , snake_case__ , )
UpperCAmelCase__ : List[str] = kwargs.pop("feature_extractor" )
UpperCAmelCase__ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(snake_case__ , snake_case__ )
def __call__( self : Optional[Any] , snake_case__ : int=None , snake_case__ : List[str]=None , snake_case__ : Union[str, Any]=None , snake_case__ : str="max_length" , snake_case__ : List[str]="np" , **snake_case__ : Optional[int] ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(snake_case__ , snake_case__ ) or (isinstance(snake_case__ , snake_case__ ) and not isinstance(text[0] , snake_case__ )):
UpperCAmelCase__ : Any = [self.tokenizer(snake_case__ , padding=snake_case__ , return_tensors=snake_case__ , **snake_case__ )]
elif isinstance(snake_case__ , snake_case__ ) and isinstance(text[0] , snake_case__ ):
UpperCAmelCase__ : List[str] = []
# Maximum number of queries across batch
UpperCAmelCase__ : List[Any] = max([len(snake_case__ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(snake_case__ ) != max_num_queries:
UpperCAmelCase__ : Tuple = t + [" "] * (max_num_queries - len(snake_case__ ))
UpperCAmelCase__ : List[Any] = self.tokenizer(snake_case__ , padding=snake_case__ , return_tensors=snake_case__ , **snake_case__ )
encodings.append(snake_case__ )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
UpperCAmelCase__ : str = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
UpperCAmelCase__ : Optional[int] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCAmelCase__ : int = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
UpperCAmelCase__ : List[str] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCAmelCase__ : Any = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
UpperCAmelCase__ : Union[str, Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCAmelCase__ : Tuple = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
UpperCAmelCase__ : Optional[Any] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
UpperCAmelCase__ : Tuple = BatchEncoding()
UpperCAmelCase__ : Union[str, Any] = input_ids
UpperCAmelCase__ : Optional[Any] = attention_mask
if query_images is not None:
UpperCAmelCase__ : Optional[Any] = BatchEncoding()
UpperCAmelCase__ : List[str] = self.image_processor(
snake_case__ , return_tensors=snake_case__ , **snake_case__ ).pixel_values
UpperCAmelCase__ : Dict = query_pixel_values
if images is not None:
UpperCAmelCase__ : Tuple = self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None and images is not None:
UpperCAmelCase__ : int = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCAmelCase__ : List[Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ )
def __a ( self : Tuple , *snake_case__ : Tuple , **snake_case__ : Tuple ):
'''simple docstring'''
return self.image_processor.post_process(*snake_case__ , **snake_case__ )
def __a ( self : Any , *snake_case__ : int , **snake_case__ : Optional[Any] ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*snake_case__ , **snake_case__ )
def __a ( self : int , *snake_case__ : List[str] , **snake_case__ : Optional[int] ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*snake_case__ , **snake_case__ )
def __a ( self : Dict , *snake_case__ : str , **snake_case__ : Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def __a ( self : str , *snake_case__ : str , **snake_case__ : Dict ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def __a ( self : Any ):
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , snake_case__ , )
return self.image_processor_class
@property
def __a ( self : List[str] ):
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , snake_case__ , )
return self.image_processor
| 298 |
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowerCAmelCase__ :
def __init__( self : str , snake_case__ : Optional[Any] , snake_case__ : List[Any]=1_3 , snake_case__ : str=7 , snake_case__ : Optional[int]=6 , snake_case__ : Union[str, Any]=1_7 , snake_case__ : Optional[Any]=2_3 , snake_case__ : int=1_1 , snake_case__ : Dict=True , ):
'''simple docstring'''
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Tuple = batch_size
UpperCAmelCase__ : Dict = seq_length
UpperCAmelCase__ : Union[str, Any] = act_dim
UpperCAmelCase__ : Dict = state_dim
UpperCAmelCase__ : Optional[Any] = hidden_size
UpperCAmelCase__ : List[str] = max_length
UpperCAmelCase__ : int = is_training
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
UpperCAmelCase__ : List[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
UpperCAmelCase__ : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase__ : int = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0 )
UpperCAmelCase__ : Optional[int] = random_attention_mask((self.batch_size, self.seq_length) )
UpperCAmelCase__ : Optional[int] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __a ( self : int ):
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __a ( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Dict = model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase__ : Optional[int] = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(DecisionTransformerModel,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ =()
SCREAMING_SNAKE_CASE_ ={'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
SCREAMING_SNAKE_CASE_ =False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = DecisionTransformerModelTester(self )
UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def __a ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
@slow
def __a ( self : List[str] ):
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Tuple = DecisionTransformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Tuple = [*signature.parameters.keys()]
UpperCAmelCase__ : str = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(snake_case__ )] , snake_case__ )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 2 # number of steps of autoregressive prediction we will perform
UpperCAmelCase__ : Tuple = 1_0 # defined by the RL environment, may be normalized
UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
UpperCAmelCase__ : Any = model.to(snake_case__ )
UpperCAmelCase__ : Optional[int] = model.config
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[int] = torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ) # env.reset()
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=snake_case__ )
UpperCAmelCase__ : List[str] = torch.tensor(snake_case__ , device=snake_case__ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
UpperCAmelCase__ : Union[str, Any] = state
UpperCAmelCase__ : Dict = torch.zeros(1 , 0 , config.act_dim , device=snake_case__ , dtype=torch.floataa )
UpperCAmelCase__ : Any = torch.zeros(1 , 0 , device=snake_case__ , dtype=torch.floataa )
UpperCAmelCase__ : Optional[int] = torch.tensor(0 , device=snake_case__ , dtype=torch.long ).reshape(1 , 1 )
for step in range(snake_case__ ):
UpperCAmelCase__ : List[Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=snake_case__ )] , dim=1 )
UpperCAmelCase__ : Optional[int] = torch.cat([rewards, torch.zeros(1 , 1 , device=snake_case__ )] , dim=1 )
UpperCAmelCase__ : Dict = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = model(
states=snake_case__ , actions=snake_case__ , rewards=snake_case__ , returns_to_go=snake_case__ , timesteps=snake_case__ , attention_mask=snake_case__ , return_dict=snake_case__ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ),
1.0,
False,
{},
)
UpperCAmelCase__ : Union[str, Any] = action_pred[0, -1]
UpperCAmelCase__ : int = torch.cat([states, state] , dim=1 )
UpperCAmelCase__ : Dict = returns_to_go[0, -1] - reward
UpperCAmelCase__ : Optional[Any] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
UpperCAmelCase__ : Tuple = torch.cat(
[timesteps, torch.ones((1, 1) , device=snake_case__ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 298 | 1 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A__ : List[str] = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def a ( lowerCamelCase_ ):
'''simple docstring'''
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def a ( lowerCamelCase_ ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
lowercase__ = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowerCamelCase_ , id=lowerCamelCase_ )
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
lowercase__ = 0
# Doctest custom flag to ignore output.
A__ : str = doctest.register_optionflag('IGNORE_RESULT')
A__ : int = doctest.OutputChecker
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def lowercase__ ( self : Union[str, Any], lowerCamelCase : Tuple, lowerCamelCase : Tuple, lowerCamelCase : Optional[int] ):
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self, lowerCamelCase, lowerCamelCase, lowerCamelCase )
A__ : Dict = CustomOutputChecker
A__ : List[str] = HfDoctestModule
A__ : Any = HfDocTestParser
| 207 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A__ : Optional[int] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
A__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 207 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase :List[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCAmelCase :int = 2_5_0_0_0_4
lowerCAmelCase :Optional[Any] = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[Any] = MBartTokenizer
A_ : List[str] = MBartTokenizerFast
A_ : List[Any] = True
A_ : int = True
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
__magic_name__ : List[Any] = MBartTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : Tuple ) -> Tuple:
__magic_name__ : Union[str, Any] = MBartTokenizer(_A , keep_accents=_A )
__magic_name__ : Union[str, Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(_A , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__magic_name__ : Union[str, Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__magic_name__ : Optional[Any] = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__magic_name__ : int = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def __lowerCAmelCase ( self : Tuple ) -> str:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__magic_name__ : Any = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__magic_name__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A )
__magic_name__ : Any = self.tokenizer_class.from_pretrained(_A , **_A )
__magic_name__ : int = tempfile.mkdtemp()
__magic_name__ : List[str] = tokenizer_r.save_pretrained(_A )
__magic_name__ : str = tokenizer_p.save_pretrained(_A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
__magic_name__ : Dict = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(_A , _A )
# Checks everything loads correctly in the same way
__magic_name__ : Any = tokenizer_r.from_pretrained(_A )
__magic_name__ : Union[str, Any] = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_A )
# Save tokenizer rust, legacy_format=True
__magic_name__ : List[Any] = tempfile.mkdtemp()
__magic_name__ : Tuple = tokenizer_r.save_pretrained(_A , legacy_format=_A )
__magic_name__ : Optional[int] = tokenizer_p.save_pretrained(_A )
# Checks it save with the same files
self.assertSequenceEqual(_A , _A )
# Checks everything loads correctly in the same way
__magic_name__ : Optional[int] = tokenizer_r.from_pretrained(_A )
__magic_name__ : int = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
# Save tokenizer rust, legacy_format=False
__magic_name__ : int = tempfile.mkdtemp()
__magic_name__ : List[str] = tokenizer_r.save_pretrained(_A , legacy_format=_A )
__magic_name__ : int = tokenizer_p.save_pretrained(_A )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__magic_name__ : Optional[Any] = tokenizer_r.from_pretrained(_A )
__magic_name__ : Optional[int] = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
A_ : Tuple = """facebook/mbart-large-en-ro"""
A_ : List[str] = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
A_ : Tuple = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
A_ : List[str] = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def __lowerCAmelCase ( cls : int ) -> str:
__magic_name__ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
__magic_name__ : Tuple = 1
return cls
def __lowerCAmelCase ( self : str ) -> List[Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 250020 )
def __lowerCAmelCase ( self : int ) -> Tuple:
__magic_name__ : List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _A )
def __lowerCAmelCase ( self : int ) -> Union[str, Any]:
self.assertIn(_A , self.tokenizer.all_special_ids )
__magic_name__ : str = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
__magic_name__ : Optional[int] = self.tokenizer.decode(_A , skip_special_tokens=_A )
__magic_name__ : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_A )
self.assertEqual(_A , _A )
self.assertNotIn(self.tokenizer.eos_token , _A )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
__magic_name__ : List[str] = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , _A )
__magic_name__ : List[Any] = 10
__magic_name__ : List[str] = self.tokenizer(_A , max_length=_A , truncation=_A ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _A )
self.assertEqual(len(_A ) , _A )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [250026, 250001] )
def __lowerCAmelCase ( self : Tuple ) -> Any:
__magic_name__ : int = tempfile.mkdtemp()
__magic_name__ : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_A )
__magic_name__ : Dict = MBartTokenizer.from_pretrained(_A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _A )
@require_torch
def __lowerCAmelCase ( self : int ) -> Tuple:
__magic_name__ : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_A , return_tensors='pt' )
__magic_name__ : int = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __lowerCAmelCase ( self : Optional[Any] ) -> str:
__magic_name__ : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_A , truncation=_A , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
__magic_name__ : Optional[int] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(_A , _A )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__magic_name__ : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _A )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
__magic_name__ : List[Any] = self.tokenizer(self.src_text , padding=_A , truncation=_A , max_length=3 , return_tensors='pt' )
__magic_name__ : Dict = self.tokenizer(
text_target=self.tgt_text , padding=_A , truncation=_A , max_length=10 , return_tensors='pt' )
__magic_name__ : Tuple = targets['input_ids']
__magic_name__ : Dict = shift_tokens_right(_A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __lowerCAmelCase ( self : Dict ) -> Any:
__magic_name__ : List[str] = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(_A ) , {
# A, test, EOS, en_XX
'input_ids': [[62, 3034, 2, 250004]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 250001,
} , ) | 275 |
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : str ):
"""simple docstring"""
return "".join(chr(ord(lowerCAmelCase ) - 32 ) if 'a' <= char <= 'z' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod() | 275 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __snake_case ( _lowercase):
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCAmelCase , '''width_multiplier''' ) )
class __snake_case :
def __init__( self : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=1_3 , __lowerCAmelCase : List[Any]=6_4 , __lowerCAmelCase : int=2 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : int="swish" , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Any=0.02 , __lowerCAmelCase : int=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Tuple=1_0 , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Tuple=0.25 , __lowerCAmelCase : Union[str, Any]=0.0 , __lowerCAmelCase : List[str]=0.0 , ):
"""simple docstring"""
_lowerCamelCase : List[str] = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : Optional[int] = image_size
_lowerCamelCase : Union[str, Any] = patch_size
_lowerCamelCase : Optional[int] = num_channels
_lowerCamelCase : Union[str, Any] = make_divisible(5_1_2 * width_multiplier , divisor=8 )
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : Dict = conv_kernel_size
_lowerCamelCase : List[Any] = output_stride
_lowerCamelCase : Any = classifier_dropout_prob
_lowerCamelCase : Tuple = use_labels
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : Optional[int] = num_labels
_lowerCamelCase : str = initializer_range
_lowerCamelCase : List[Any] = scope
_lowerCamelCase : Any = width_multiplier
_lowerCamelCase : int = ffn_dropout
_lowerCamelCase : Union[str, Any] = attn_dropout
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Union[str, Any] = None
if self.use_labels:
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowerCamelCase : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = MobileViTVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.num_labels
_lowerCamelCase : Optional[Any] = MobileViTVaForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Any = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.num_labels
_lowerCamelCase : List[str] = MobileViTVaForSemanticSegmentation(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : str = model(__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : List[str] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
snake_case__ : Optional[int] = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
snake_case__ : int = False
snake_case__ : List[str] = False
snake_case__ : Tuple = False
snake_case__ : int = False
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : int = MobileViTVaModelTester(self )
_lowerCamelCase : Union[str, Any] = MobileViTVaConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Any = model_class(__lowerCAmelCase )
_lowerCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Dict = [*signature.parameters.keys()]
_lowerCamelCase : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
def check_hidden_states_output(__lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] ):
_lowerCamelCase : Tuple = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCamelCase : Dict = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
_lowerCamelCase : List[Any] = outputs.hidden_states
_lowerCamelCase : List[str] = 5
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_lowerCamelCase : Union[str, Any] = 2
for i in range(len(__lowerCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Any = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : List[Any] = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : List[Any] = MobileViTVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
__lowerCAmelCase )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : Optional[Any] = prepare_img()
_lowerCamelCase : Any = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Any = model(**__lowerCAmelCase )
# verify the logits
_lowerCamelCase : List[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
_lowerCamelCase : int = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : int = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
_lowerCamelCase : Any = model.to(__lowerCAmelCase )
_lowerCamelCase : Tuple = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
_lowerCamelCase : Dict = prepare_img()
_lowerCamelCase : Union[str, Any] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = outputs.logits
# verify the logits
_lowerCamelCase : Dict = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = torch.tensor(
[
[[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]],
[[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]],
[[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]],
] , device=__lowerCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowerCAmelCase , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
_lowerCamelCase : Tuple = model.to(__lowerCAmelCase )
_lowerCamelCase : Tuple = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Optional[int] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__lowerCAmelCase )
_lowerCamelCase : List[str] = outputs.logits.detach().cpu()
_lowerCamelCase : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase , target_sizes=[(5_0, 6_0)] )
_lowerCamelCase : Dict = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , __lowerCAmelCase )
_lowerCamelCase : List[Any] = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase )
_lowerCamelCase : str = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , __lowerCAmelCase )
| 72 | import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_UpperCAmelCase = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
_UpperCAmelCase = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
_UpperCAmelCase = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
_UpperCAmelCase = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
_UpperCAmelCase = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Tuple ):
'''simple docstring'''
for tf_name, hf_name in patterns:
A_ : Tuple = k.replace(__lowercase ,__lowercase )
return k
def UpperCamelCase ( __lowercase : dict ,__lowercase : dict ):
'''simple docstring'''
A_ : int = BigBirdPegasusConfig(**__lowercase )
A_ : Any = BigBirdPegasusForConditionalGeneration(__lowercase )
A_ : Union[str, Any] = torch_model.state_dict()
A_ : Any = {}
# separating decoder weights
A_ : Any = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )}
A_ : str = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )}
for k, v in tqdm(decoder_weights.items() ,'tf -> hf conversion' ):
A_ : Optional[int] = [k.endswith(__lowercase ) for ending in KEYS_TO_IGNORE]
if any(__lowercase ):
continue
A_ : Optional[Any] = DECODER_PATTERNS
A_ : Tuple = rename_state_dict_key(__lowercase ,__lowercase )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
A_ : Any = v.T
A_ : Any = torch.from_numpy(__lowercase )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items() ,'tf -> hf conversion' ):
A_ : int = [k.endswith(__lowercase ) for ending in KEYS_TO_IGNORE]
if any(__lowercase ):
continue
A_ : Any = REMAINING_PATTERNS
A_ : List[str] = rename_state_dict_key(__lowercase ,__lowercase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
A_ : int = v.T
A_ : Dict = torch.from_numpy(__lowercase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
A_ : Optional[int] = mapping['model.embed_positions.weight']
A_ : Tuple = mapping.pop('model.embed_positions.weight' )
A_ , A_ : Optional[Any] = torch_model.load_state_dict(__lowercase ,strict=__lowercase )
A_ : Optional[int] = [
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def UpperCamelCase ( __lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : str = tf.train.list_variables(__lowercase )
A_ : Union[str, Any] = {}
A_ : Optional[Any] = ['global_step']
for name, shape in tqdm(__lowercase ,desc='converting tf checkpoint to dict' ):
A_ : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
A_ : Tuple = tf.train.load_variable(__lowercase ,__lowercase )
A_ : Dict = array
return tf_weights
def UpperCamelCase ( __lowercase : str ,__lowercase : str ,__lowercase : dict ):
'''simple docstring'''
A_ : Optional[Any] = get_tf_weights_as_numpy(__lowercase )
A_ : Dict = convert_bigbird_pegasus(__lowercase ,__lowercase )
torch_model.save_pretrained(__lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 140 | 0 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( A_ , unittest.TestCase ):
snake_case__ : str = LayoutLMTokenizer
snake_case__ : Union[str, Any] = LayoutLMTokenizerFast
snake_case__ : List[str] = True
snake_case__ : int = True
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
super().setUp()
a_ : int = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
a_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
a_ : List[Any] = 'UNwant\u00E9d,running'
a_ : List[Any] = 'unwanted, running'
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
a_ : List[str] = self.tokenizer_class(self.vocab_file )
a_ : str = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_lowerCamelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [7, 4, 5, 1_0, 8, 9] )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
pass
| 368 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
UpperCAmelCase_ : str = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE_ ( __A : Dict , __A : List[str] ) -> Tuple:
"""simple docstring"""
if os.path.exists(__A ):
if os.path.exists(os.path.join(__A , 'config.json' ) ) and os.path.isfile(
os.path.join(__A , 'config.json' ) ):
os.remove(os.path.join(__A , 'config.json' ) )
if os.path.exists(os.path.join(__A , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(__A , 'pytorch_model.bin' ) ):
os.remove(os.path.join(__A , 'pytorch_model.bin' ) )
else:
os.makedirs(__A )
model.save_pretrained(__A )
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : Dict=False ) -> Any:
"""simple docstring"""
a_ : Optional[Any] = 2
if unlogit:
a_ : List[str] = torch.pow(__A , __A )
a_ : Tuple = p * torch.log(__A )
a_ : Union[str, Any] = 0
return -plogp.sum(dim=-1 )
def SCREAMING_SNAKE_CASE_ ( __A : Any ) -> Tuple:
"""simple docstring"""
logger.info('lv, h >\t' + '\t'.join(F"""{x + 1}""" for x in range(len(__A ) ) ) )
for row in range(len(__A ) ):
if tensor.dtype != torch.long:
logger.info(F"""layer {row + 1}:\t""" + '\t'.join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(F"""layer {row + 1}:\t""" + '\t'.join(F"""{x:d}""" for x in tensor[row].cpu().data ) )
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : Dict , __A : Union[str, Any] , __A : List[str]=True , __A : str=True , __A : int=None , __A : List[str]=False ) -> List[Any]:
"""simple docstring"""
a_ , a_ : List[str] = model.config.num_hidden_layers, model.config.num_attention_heads
a_ : Tuple = torch.zeros(__A , __A ).to(args.device )
a_ : Optional[int] = torch.zeros(__A , __A ).to(args.device )
if head_mask is None:
a_ : Tuple = torch.ones(__A , __A ).to(args.device )
head_mask.requires_grad_(requires_grad=__A )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
a_ : List[str] = None
a_ : Optional[Any] = 0.0
a_ : Optional[int] = 0.0
for step, inputs in enumerate(tqdm(__A , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
a_ : Any = tuple(t.to(args.device ) for t in inputs )
((a_) , ) : Dict = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
a_ : Tuple = model(__A , labels=__A , head_mask=__A )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
a_ , a_ , a_ : Optional[Any] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__A ):
a_ : List[str] = entropy(attn.detach() , __A )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__A ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
a_ : int = 2
a_ : Dict = torch.pow(torch.pow(__A , __A ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-2_0
if not args.dont_normalize_global_importance:
a_ : Dict = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(__A )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(__A )
logger.info('Head ranked by importance scores' )
a_ : Optional[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
a_ : Tuple = torch.arange(
head_importance.numel() , device=args.device )
a_ : Optional[Any] = head_ranks.view_as(__A )
print_ad_tensor(__A )
return attn_entropy, head_importance, total_loss
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] , __A : List[Any] , __A : str ) -> Union[str, Any]:
"""simple docstring"""
a_ , a_ , a_ : Any = compute_heads_importance(__A , __A , __A , compute_entropy=__A )
a_ : List[str] = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , __A , original_score * args.masking_threshold )
a_ : List[Any] = torch.ones_like(__A )
a_ : Optional[Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
a_ : List[Any] = original_score
while current_score >= original_score * args.masking_threshold:
a_ : Union[str, Any] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
a_ : str = float('Inf' )
a_ : Any = head_importance.view(-1 ).sort()[1]
if len(__A ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
a_ : Any = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
a_ : Optional[Any] = new_head_mask.view(-1 )
a_ : Optional[int] = 0.0
a_ : List[str] = new_head_mask.view_as(__A )
a_ : Dict = new_head_mask.clone().detach()
print_ad_tensor(__A )
# Compute metric and head importance again
a_ , a_ , a_ : int = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , head_mask=__A )
a_ : Optional[int] = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , __A , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('Final head mask' )
print_ad_tensor(__A )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : int , __A : Union[str, Any] , __A : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
a_ : Dict = datetime.now()
a_ , a_ , a_ : Union[str, Any] = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A )
a_ : Union[str, Any] = 1 / loss
a_ : List[Any] = datetime.now() - before_time
a_ : str = sum(p.numel() for p in model.parameters() )
a_ : Any = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__A ) )
}
for k, v in heads_to_prune.items():
if isinstance(__A , __A ):
a_ : List[str] = [
v,
]
assert sum(len(__A ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__A )
a_ : str = sum(p.numel() for p in model.parameters() )
a_ : Union[str, Any] = datetime.now()
a_ , a_ , a_ : int = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A , actually_pruned=__A , )
a_ : int = 1 / loss
a_ : str = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , __A , __A , pruned_num_params / original_num_params * 1_00 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , __A , __A )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 )
save_model(__A , args.output_dir )
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
"""simple docstring"""
a_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=__A , type=__A , required=__A , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=__A , type=__A , required=__A , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=__A , type=__A , required=__A , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=__A , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=__A , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=__A , type=__A , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=__A , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=__A , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=__A , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=__A , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_28 , type=__A , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=__A , help='Batch size.' )
parser.add_argument('--seed' , type=__A , default=42 )
parser.add_argument('--local_rank' , type=__A , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=__A , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__A , default='' , help='Can be used for distant debugging.' )
a_ : List[Any] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__A )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
a_ : str = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
a_ : List[Any] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
a_ : Any = torch.device('cuda' , args.local_rank )
a_ : Union[str, Any] = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
a_ : Union[str, Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
a_ : List[Any] = nn.parallel.DistributedDataParallel(
__A , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__A )
elif args.n_gpu > 1:
a_ : Optional[int] = nn.DataParallel(__A )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__A )
torch.save(__A , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , __A )
# Prepare dataset
a_ : Optional[Any] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
a_ : Tuple = (torch.from_numpy(__A ),)
a_ : Optional[int] = TensorDataset(*__A )
a_ : Any = RandomSampler(__A )
a_ : str = DataLoader(__A , sampler=__A , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__A , __A , __A )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
a_ : Optional[Any] = mask_heads(__A , __A , __A )
prune_heads(__A , __A , __A , __A )
if __name__ == "__main__":
main()
| 120 | 0 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class a_ ( lowerCamelCase ):
lowercase = ["""image_processor""", """tokenizer"""]
lowercase = """BlipImageProcessor"""
lowercase = """AutoTokenizer"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# add QFormer tokenizer
UpperCamelCase = qformer_tokenizer
def __call__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> BatchFeature:
"""simple docstring"""
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
UpperCamelCase = BatchFeature()
if text is not None:
UpperCamelCase = self.tokenizer(
text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_special_tokens_mask=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_length=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
encoding.update(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.qformer_tokenizer(
text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_special_tokens_mask=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_length=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase = qformer_text_encoding.pop("""input_ids""" )
UpperCamelCase = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
UpperCamelCase = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
encoding.update(_SCREAMING_SNAKE_CASE )
return encoding
def A__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.tokenizer.model_input_names
UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def A__ ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
raise ValueError(F"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
UpperCamelCase = os.path.join(_SCREAMING_SNAKE_CASE , """qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
return super().save_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@classmethod
def A__ ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder="""qformer_tokenizer""" )
UpperCamelCase = cls._get_arguments_from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
args.append(_SCREAMING_SNAKE_CASE )
return cls(*_SCREAMING_SNAKE_CASE )
| 321 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 321 | 1 |
"""simple docstring"""
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def lowerCAmelCase__ ( UpperCamelCase__=3_2 , UpperCamelCase__=1_0 , UpperCamelCase__=1_0_0 , UpperCamelCase__=1_0_2_6 , UpperCamelCase__=True , UpperCamelCase__="data/tokenized_stories_train_wikitext103.jbl" , UpperCamelCase__="igf_context_pairs.jbl" , ):
'''simple docstring'''
set_seed(3 )
# generate train_data and objective_set
_a : int = generate_datasets(
UpperCamelCase__ , UpperCamelCase__ , number=UpperCamelCase__ , min_len=1_0_2_6 , trim=UpperCamelCase__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
_a : str = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# load pretrained model
_a : Optional[int] = load_gpta("""gpt2""" ).to(UpperCamelCase__ )
print("""computing perplexity on objective set""" )
_a : int = compute_perplexity(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).item()
print("""perplexity on objective set:""" , UpperCamelCase__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__=1_5 , UpperCamelCase__=1_2_8 , UpperCamelCase__=1_0_0 , UpperCamelCase__="igf_model.pt" , ):
'''simple docstring'''
set_seed(4_2 )
# Load pre-trained model
_a : List[Any] = GPTaLMHeadModel.from_pretrained("""gpt2""" )
# Initialize secondary learner to use embedding weights of model
_a : Optional[Any] = SecondaryLearner(UpperCamelCase__ )
# Train secondary learner
_a : List[str] = train_secondary_learner(
UpperCamelCase__ , UpperCamelCase__ , max_epochs=UpperCamelCase__ , batch_size=UpperCamelCase__ , eval_freq=1_0_0 , igf_model_path=UpperCamelCase__ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=3_2 , UpperCamelCase__=1_0_0_0 , UpperCamelCase__=1_6 , UpperCamelCase__=1.0 , UpperCamelCase__=recopy_gpta , UpperCamelCase__=None , UpperCamelCase__=1_0 , UpperCamelCase__="gpt2_finetuned.pt" , ):
'''simple docstring'''
_a : Union[str, Any] = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
_a : Any = RandomSampler(UpperCamelCase__ )
_a : int = DataLoader(UpperCamelCase__ , sampler=UpperCamelCase__ )
_a : List[Any] = max_steps // (len(UpperCamelCase__ )) + 1
_a : str = 0
_a : Dict = torch.zeros((1, context_len) , dtype=torch.long , device=UpperCamelCase__ )
_a : Tuple = recopy_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(UpperCamelCase__ )
secondary_learner.eval()
_a : str = []
_a : Optional[int] = 0
_a : str = []
_a : Dict = []
# Compute the performance of the transformer model at the beginning
_a : Dict = compute_perplexity(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
test_perps.append(UpperCamelCase__ )
print("""Test perplexity, step""" , UpperCamelCase__ , """:""" , UpperCamelCase__ )
for epoch in range(int(UpperCamelCase__ ) ):
for step, example in enumerate(UpperCamelCase__ ):
torch.cuda.empty_cache()
_a : List[Any] = random.randint(0 , example.size(2 ) - context_len - 1 )
_a : List[Any] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
_a : Union[str, Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
_a : str = True
if secondary_learner is not None:
_a : List[str] = secondary_learner.forward(
torch.tensor(UpperCamelCase__ , dtype=torch.long , device=UpperCamelCase__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(UpperCamelCase__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 1_0:
_a : Tuple = -1
if predicted_q < threshold:
_a : List[str] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
_a : Any = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
_a : str = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
_a : Optional[Any] = compute_perplexity(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
test_perps.append(UpperCamelCase__ )
print("""Test perplexity, step""" , UpperCamelCase__ , """:""" , UpperCamelCase__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 6_0:
break
if max_steps > 0 and global_step > 6_0:
break
# save finetuned transformer model
torch.save(model.state_dict() , UpperCamelCase__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : List[str] = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" )
# Required parameters
parser.add_argument(
"""--data_dir""" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""The input data dir. Should contain data files for WikiText.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--data_file""" , type=UpperCamelCase__ , default=UpperCamelCase__ , help=(
"""A jbl file containing tokenized data which can be split as objective dataset, """
"""train_dataset and test_dataset."""
) , )
parser.add_argument(
"""--igf_data_file""" , type=UpperCamelCase__ , default=UpperCamelCase__ , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , )
parser.add_argument(
"""--output_dir""" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""The output directory where the final fine-tuned model is stored.""" , )
parser.add_argument(
"""--tokenizer_name""" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument("""--seed""" , type=UpperCamelCase__ , default=UpperCamelCase__ , help="""A seed for reproducible training.""" )
parser.add_argument(
"""--context_len""" , default=3_2 , type=UpperCamelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--size_objective_set""" , default=1_0_0 , type=UpperCamelCase__ , help="""number of articles that are long enough to be used as our objective set""" , )
parser.add_argument(
"""--eval_freq""" , default=1_0_0 , type=UpperCamelCase__ , help="""secondary model evaluation is triggered at eval_freq""" )
parser.add_argument("""--max_steps""" , default=1_0_0_0 , type=UpperCamelCase__ , help="""To calculate training epochs""" )
parser.add_argument(
"""--secondary_learner_batch_size""" , default=1_2_8 , type=UpperCamelCase__ , help="""batch size of training data for secondary learner""" , )
parser.add_argument(
"""--batch_size""" , default=1_6 , type=UpperCamelCase__ , help="""batch size of training data of language model(gpt2) """ )
parser.add_argument(
"""--eval_interval""" , default=1_0 , type=UpperCamelCase__ , help=(
"""decay the selectivity of our secondary learner filter from"""
"""1 standard deviation above average to 1 below average after 10 batches"""
) , )
parser.add_argument(
"""--number""" , default=1_0_0 , type=UpperCamelCase__ , help="""The number of examples split to be used as objective_set/test_data""" )
parser.add_argument(
"""--min_len""" , default=1_0_2_6 , type=UpperCamelCase__ , help="""The minimum length of the article to be used as objective set""" )
parser.add_argument(
"""--secondary_learner_max_epochs""" , default=1_5 , type=UpperCamelCase__ , help="""number of epochs to train secondary learner""" )
parser.add_argument("""--trim""" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="""truncate the example if it exceeds context length""" )
parser.add_argument(
"""--threshold""" , default=1.0 , type=UpperCamelCase__ , help=(
"""The threshold value used by secondary learner to filter the train_data and allow only"""
""" informative data as input to the model"""
) , )
parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=UpperCamelCase__ , help="""finetuned_model_name""" )
parser.add_argument(
"""--recopy_model""" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=3_2 , max_steps=1_0 , size_objective_set=1_0_0 , min_len=1_0_2_6 , trim=UpperCamelCase__ , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , )
# Load train data for secondary learner
_a : int = joblib.load("""data/IGF_values.jbl""" )
# Train secondary learner
_a : List[str] = training_secondary_learner(
UpperCamelCase__ , secondary_learner_max_epochs=1_5 , secondary_learner_batch_size=1_2_8 , eval_freq=1_0_0 , igf_model_path="""igf_model.pt""" , )
# load pretrained gpt2 model
_a : str = GPTaLMHeadModel.from_pretrained("""gpt2""" )
set_seed(4_2 )
# Generate train and test data to train and evaluate gpt2 model
_a : Tuple = generate_datasets(
context_len=3_2 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=1_0_0 , min_len=1_0_2_6 , trim=UpperCamelCase__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , context_len=3_2 , max_steps=1_0_0_0 , batch_size=1_6 , threshold=1.0 , recopy_model=UpperCamelCase__ , secondary_learner=UpperCamelCase__ , eval_interval=1_0 , finetuned_model_name="""gpt2_finetuned.pt""" , )
if __name__ == "__main__":
main()
| 360 |
"""simple docstring"""
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class UpperCamelCase ( unittest.TestCase , snake_case_ ):
def _lowercase ( self : int ) -> int:
_a : Optional[Any] = load_tool("""text-to-speech""" )
self.tool.setup()
def _lowercase ( self : List[str] ) -> Union[str, Any]:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
_a : str = self.tool("""hey""" )
_a : List[str] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
_a : int = self.tool("""hey""" )
_a : str = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 324 | 0 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
UpperCAmelCase__ , UpperCAmelCase__ = head.next, head
while fast and fast.next:
UpperCAmelCase__ = fast.next.next
UpperCAmelCase__ = slow.next
UpperCAmelCase__ = slow.next
UpperCAmelCase__ = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase__ = None
while second:
UpperCAmelCase__ = second.next
UpperCAmelCase__ = node
UpperCAmelCase__ = second
UpperCAmelCase__ = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase__ = node.next
UpperCAmelCase__ = head.next
return True
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase__ = UpperCAmelCase__ = UpperCAmelCase__ = head
while fast and fast.next:
UpperCAmelCase__ , UpperCAmelCase__ = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase__ = [slow.val]
while slow.next:
UpperCAmelCase__ = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase__ = cur.next
return True
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
if not head or not head.next:
return True
UpperCAmelCase__ = {}
UpperCAmelCase__ = 0
while head:
if head.val in d:
d[head.val].append(SCREAMING_SNAKE_CASE__ )
else:
UpperCAmelCase__ = [pos]
UpperCAmelCase__ = head.next
pos += 1
UpperCAmelCase__ = pos - 1
UpperCAmelCase__ = 0
for v in d.values():
if len(SCREAMING_SNAKE_CASE__ ) % 2 != 0:
middle += 1
else:
UpperCAmelCase__ = 0
for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) ):
if v[i] + v[len(SCREAMING_SNAKE_CASE__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 346 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 1000000 ):
'''simple docstring'''
UpperCAmelCase__ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE__ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 346 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _A ( A__ , A__ , A__ , A__ , A__ , A__ = None , ):
"""simple docstring"""
__lowercase = {}
if train_file is not None:
__lowercase = [train_file]
if eval_file is not None:
__lowercase = [eval_file]
if test_file is not None:
__lowercase = [test_file]
__lowercase = datasets.load_dataset('''csv''' , data_files=A__ )
__lowercase = list(ds[list(files.keys() )[0]].features.keys() )
__lowercase = features_name.pop(A__ )
__lowercase = list(set(ds[list(files.keys() )[0]][label_name] ) )
__lowercase = {label: i for i, label in enumerate(A__ )}
__lowercase = tokenizer.model_input_names
__lowercase = {}
if len(A__ ) == 1:
for k in files.keys():
__lowercase = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=A__ , max_length=A__ , padding='''max_length''' ) , batched=A__ , )
elif len(A__ ) == 2:
for k in files.keys():
__lowercase = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=A__ , max_length=A__ , padding='''max_length''' , ) , batched=A__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__lowercase = {k: v for k, v in ex.items() if k in input_names}
__lowercase = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__lowercase = {k: v for k, v in ex.items() if k in input_names}
__lowercase = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__lowercase = {k: v for k, v in ex.items() if k in input_names}
__lowercase = labelaid[ex[label_name]]
yield (d, label)
__lowercase = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__lowercase = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__lowercase = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__lowercase = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__lowercase = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__lowercase = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class lowercase_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = field(metadata={'help': 'Which column contains the label'} )
SCREAMING_SNAKE_CASE : str = field(default=lowerCamelCase__ , metadata={'help': 'The path of the training file'} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=lowerCamelCase__ , metadata={'help': 'The path of the development file'} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=lowerCamelCase__ , metadata={'help': 'The path of the test file'} )
SCREAMING_SNAKE_CASE : int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=lowerCamelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class lowercase_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=lowerCamelCase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=lowerCamelCase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE : bool = field(default=lowerCamelCase__ , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=lowerCamelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def _A ( ):
"""simple docstring"""
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
F"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, "
F"16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowercase , __lowercase , __lowercase , __lowercase = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=A__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__lowercase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(A__ ) , labelaid=A__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__lowercase = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , )
def compute_metrics(A__ ) -> Dict:
__lowercase = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__lowercase = TFTrainer(
model=A__ , args=A__ , train_dataset=A__ , eval_dataset=A__ , compute_metrics=A__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowercase = trainer.evaluate()
__lowercase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(A__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
results.update(A__ )
return results
if __name__ == "__main__":
main()
| 52 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52 | 1 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
UpperCAmelCase : int = logging.get_logger(__name__)
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , *_A , **_A ):
warnings.warn(
'The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use DeformableDetrImageProcessor instead.' , _A , )
super().__init__(*_A , **_A )
| 280 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase : Any = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Any = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Any = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00])
a__: Union[str, Any] = get_activation('gelu')
self.assertTrue(torch.allclose(gelu_python(lowercase) , torch_builtin(lowercase)))
self.assertFalse(torch.allclose(gelu_python(lowercase) , gelu_new(lowercase)))
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: Dict = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00])
a__: Dict = get_activation('gelu')
a__: List[Any] = get_activation('gelu_10')
a__: List[Any] = torch_builtin(lowercase)
a__: Union[str, Any] = geluaa(lowercase)
a__: List[Any] = torch.where(y_gelu_aa < 10.0 , 1 , 0)
self.assertTrue(torch.max(lowercase).item() == 10.0)
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask))
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
get_activation('gelu')
get_activation('gelu_10')
get_activation('gelu_fast')
get_activation('gelu_new')
get_activation('gelu_python')
get_activation('gelu_pytorch_tanh')
get_activation('linear')
get_activation('mish')
get_activation('quick_gelu')
get_activation('relu')
get_activation('sigmoid')
get_activation('silu')
get_activation('swish')
get_activation('tanh')
with self.assertRaises(lowercase):
get_activation('bogus')
with self.assertRaises(lowercase):
get_activation(lowercase)
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Dict = get_activation('gelu')
a__: Optional[int] = 1
a__: Union[str, Any] = get_activation('gelu')
self.assertEqual(acta.a , 1)
with self.assertRaises(lowercase):
a__: Union[str, Any] = acta.a
| 368 | """simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
def __init__( self , lowercase , lowercase=12 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase=0.1 , lowercase=0.1 , lowercase=5_12 , lowercase=0.02 , lowercase=0 , lowercase=None , ) -> Optional[int]:
'''simple docstring'''
a__: List[Any] = parent
a__: Any = batch_size
a__: int = seq_length
a__: List[str] = is_training
a__: Any = use_input_mask
a__: Optional[Any] = use_labels
a__: List[Any] = vocab_size
a__: Optional[Any] = hidden_size
a__: Any = projection_dim
a__: List[str] = num_hidden_layers
a__: Dict = num_attention_heads
a__: int = intermediate_size
a__: Tuple = dropout
a__: Union[str, Any] = attention_dropout
a__: str = max_position_embeddings
a__: List[str] = initializer_range
a__: Optional[int] = scope
a__: List[Any] = bos_token_id
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a__: str = None
if self.use_input_mask:
a__: List[str] = random_attention_mask([self.batch_size, self.seq_length])
if input_mask is not None:
a__: str = input_mask.numpy()
a__ , a__: Any = input_mask.shape
a__: Tuple = np.random.randint(1 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(lowercase):
a__: Tuple = 1
a__: Any = 0
a__: Any = self.get_config()
return config, input_ids, tf.convert_to_tensor(lowercase)
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> List[Any]:
'''simple docstring'''
a__: Any = TFBlipTextModel(config=lowercase)
a__: Union[str, Any] = model(lowercase , attention_mask=lowercase , training=lowercase)
a__: List[Any] = model(lowercase , training=lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Tuple = self.prepare_config_and_inputs()
a__ , a__ , a__: Optional[Any] = config_and_inputs
a__: Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = (TFBlipTextModel,) if is_tf_available() else ()
a__ = False
a__ = False
a__ = False
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: List[Any] = BlipTextModelTester(self)
a__: int = ConfigTester(self , config_class=lowercase , hidden_size=37)
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase)
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
pass
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='Blip does not use inputs_embeds')
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING')
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING')
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
pass
@slow
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__: Tuple = TFBlipTextModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
def lowerCamelCase_ ( self , lowercase=True) -> str:
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=lowercase)
| 203 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ : Optional[Any] = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[Any] = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
A__ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 103 |
A__ : Any = '''Tobias Carryer'''
from time import time
class __snake_case :
def __init__( self : Any , A_ : Tuple , A_ : Dict , A_ : Tuple , A_ : str=int(time())): # noqa: B008
lowerCAmelCase_ : int = multiplier
lowerCAmelCase_ : int = increment
lowerCAmelCase_ : str = modulo
lowerCAmelCase_ : str = seed
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : Optional[int] = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
A__ : Union[str, Any] = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 103 | 1 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : str = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
__UpperCAmelCase : Optional[int] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
__UpperCAmelCase : Optional[int] = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
@lru_cache()
def a ( ):
"""simple docstring"""
UpperCamelCase : Dict = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
UpperCamelCase : str = bs[:]
UpperCamelCase : List[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(SCREAMING_SNAKE_CASE_ )
cs.append(2**8 + n )
n += 1
UpperCamelCase : List[str] = [chr(SCREAMING_SNAKE_CASE_ ) for n in cs]
return dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : List[str] = set()
UpperCamelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase : Tuple = char
return pairs
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = VOCAB_FILES_NAMES
__UpperCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Any = ["input_ids", "attention_mask"]
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="replace" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else bos_token
UpperCamelCase : Any = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else eos_token
UpperCamelCase : str = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else sep_token
UpperCamelCase : int = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cls_token
UpperCamelCase : int = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else unk_token
UpperCamelCase : List[Any] = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase : List[str] = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as vocab_handle:
UpperCamelCase : List[Any] = json.load(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
UpperCamelCase : Dict = errors # how to handle errors in decoding
UpperCamelCase : Optional[int] = bytes_to_unicode()
UpperCamelCase : Any = {v: k for k, v in self.byte_encoder.items()}
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as merges_handle:
UpperCamelCase : str = merges_handle.read().split('''\n''' )[1:-1]
UpperCamelCase : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
UpperCamelCase : str = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase : int = {}
UpperCamelCase : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase : Union[str, Any] = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def _lowercase ( self ):
"""simple docstring"""
return len(self.encoder )
def _lowercase ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase : Optional[Any] = tuple(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = get_pairs(__SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
UpperCamelCase : int = min(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : self.bpe_ranks.get(__SCREAMING_SNAKE_CASE , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase , UpperCamelCase : List[Any] = bigram
UpperCamelCase : Optional[Any] = []
UpperCamelCase : Dict = 0
while i < len(__SCREAMING_SNAKE_CASE ):
try:
UpperCamelCase : Any = word.index(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase : Any = j
if word[i] == first and i < len(__SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase : int = tuple(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = new_word
if len(__SCREAMING_SNAKE_CASE ) == 1:
break
else:
UpperCamelCase : Optional[int] = get_pairs(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = ''' '''.join(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = word
return word
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = []
for token in re.findall(self.pat , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Dict = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__SCREAMING_SNAKE_CASE ).split(''' ''' ) )
return bpe_tokens
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.encoder.get(__SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.decoder.get(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = ''''''.join(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase : List[str] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase : Optional[Any] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__SCREAMING_SNAKE_CASE , ensure_ascii=__SCREAMING_SNAKE_CASE ) + '''\n''' )
UpperCamelCase : Tuple = 0
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
UpperCamelCase : Optional[Any] = token_index
writer.write(''' '''.join(__SCREAMING_SNAKE_CASE ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase : str = [self.cls_token_id]
UpperCamelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = [self.sep_token_id]
UpperCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__SCREAMING_SNAKE_CASE ) > 0 and not text[0].isspace()):
UpperCamelCase : Union[str, Any] = ''' ''' + text
return (text, kwargs)
| 315 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__UpperCAmelCase : Union[str, Any] = logging.getLogger()
def a ( ):
"""simple docstring"""
UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCamelCase : List[str] = parser.parse_args()
return args.f
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = logging.StreamHandler(sys.stdout )
logger.addHandler(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , '''run_glue_deebert.py''' )
with patch.object(__SCREAMING_SNAKE_CASE , '''argv''' , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(__SCREAMING_SNAKE_CASE , 0.666 )
@slow
@require_torch_non_multi_gpu
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
| 315 | 1 |
"""simple docstring"""
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , **__A ) -> str:
requires_backends(self , ["""bs4"""] )
super().__init__(**__UpperCAmelCase )
def __lowerCAmelCase ( self , __A ) -> str:
lowerCAmelCase_ :str = []
lowerCAmelCase_ :List[str] = []
lowerCAmelCase_ :Optional[int] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
lowerCAmelCase_ :str = parent.find_all(child.name , recursive=__UpperCAmelCase )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__UpperCAmelCase ) else next(i for i, s in enumerate(__UpperCAmelCase , 1 ) if s is child ) )
lowerCAmelCase_ :Dict = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def __lowerCAmelCase ( self , __A ) -> Tuple:
lowerCAmelCase_ :Dict = BeautifulSoup(__UpperCAmelCase , """html.parser""" )
lowerCAmelCase_ :Any = []
lowerCAmelCase_ :Union[str, Any] = []
lowerCAmelCase_ :str = []
for element in html_code.descendants:
if type(__UpperCAmelCase ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
lowerCAmelCase_ :Optional[int] = html.unescape(__UpperCAmelCase ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__UpperCAmelCase )
lowerCAmelCase_ , lowerCAmelCase_ :Dict = self.xpath_soup(__UpperCAmelCase )
stringaxtag_seq.append(__UpperCAmelCase )
stringaxsubs_seq.append(__UpperCAmelCase )
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError("""Number of doc strings and xtags does not correspond""" )
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError("""Number of doc strings and xsubs does not correspond""" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def __lowerCAmelCase ( self , __A , __A ) -> List[str]:
lowerCAmelCase_ :Any = """"""
for tagname, subs in zip(__UpperCAmelCase , __UpperCAmelCase ):
xpath += f"""/{tagname}"""
if subs != 0:
xpath += f"""[{subs}]"""
return xpath
def __call__( self , __A ) -> BatchFeature:
lowerCAmelCase_ :Optional[Any] = False
# Check that strings has a valid type
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase_ :Any = True
elif isinstance(__UpperCAmelCase , (list, tuple) ):
if len(__UpperCAmelCase ) == 0 or isinstance(html_strings[0] , __UpperCAmelCase ):
lowerCAmelCase_ :Optional[Any] = True
if not valid_strings:
raise ValueError(
"""HTML strings must of type `str`, `List[str]` (batch of examples), """
f"""but is of type {type(__UpperCAmelCase )}.""" )
lowerCAmelCase_ :List[str] = bool(isinstance(__UpperCAmelCase , (list, tuple) ) and (isinstance(html_strings[0] , __UpperCAmelCase )) )
if not is_batched:
lowerCAmelCase_ :List[str] = [html_strings]
# Get nodes + xpaths
lowerCAmelCase_ :str = []
lowerCAmelCase_ :Tuple = []
for html_string in html_strings:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Any = self.get_three_from_single(__UpperCAmelCase )
nodes.append(__UpperCAmelCase )
lowerCAmelCase_ :Union[str, Any] = []
for node, tag_list, sub_list in zip(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase_ :Union[str, Any] = self.construct_xpath(__UpperCAmelCase , __UpperCAmelCase )
xpath_strings.append(__UpperCAmelCase )
xpaths.append(__UpperCAmelCase )
# return as Dict
lowerCAmelCase_ :List[Any] = {"""nodes""": nodes, """xpaths""": xpaths}
lowerCAmelCase_ :Optional[Any] = BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
return encoded_inputs
| 84 |
"""simple docstring"""
from __future__ import annotations
import math
def A ( snake_case__ ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = str(snake_case__ )
SCREAMING_SNAKE_CASE__ = [n]
for i in range(1 , len(snake_case__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def A ( snake_case__ ):
'''simple docstring'''
if len(str(snake_case__ ) ) > 3:
if not is_prime(int(str(snake_case__ )[-3:] ) ) or not is_prime(int(str(snake_case__ )[:3] ) ):
return False
return True
def A ( snake_case__ = 11 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 13
while len(snake_case__ ) != count:
if validate(snake_case__ ):
SCREAMING_SNAKE_CASE__ = list_truncated_nums(snake_case__ )
if all(is_prime(snake_case__ ) for i in list_nums ):
list_truncated_primes.append(snake_case__ )
num += 2
return list_truncated_primes
def A ( ):
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F'{sum(compute_truncated_primes(11)) = }')
| 165 | 0 |
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
snake_case_ = logging.get_logger("""transformers.models.speecht5""")
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
hf_model.apply_weight_norm()
UpperCAmelCase = checkpoint['input_conv.weight_g']
UpperCAmelCase = checkpoint['input_conv.weight_v']
UpperCAmelCase = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
UpperCAmelCase = checkpoint[F"""upsamples.{i}.1.weight_g"""]
UpperCAmelCase = checkpoint[F"""upsamples.{i}.1.weight_v"""]
UpperCAmelCase = checkpoint[F"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCAmelCase = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""]
UpperCAmelCase = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""]
UpperCAmelCase = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""]
UpperCAmelCase = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""]
UpperCAmelCase = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""]
UpperCAmelCase = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""]
UpperCAmelCase = checkpoint['output_conv.1.weight_g']
UpperCAmelCase = checkpoint['output_conv.1.weight_v']
UpperCAmelCase = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , ):
if config_path is not None:
UpperCAmelCase = SpeechTaHifiGanConfig.from_pretrained(lowercase_ )
else:
UpperCAmelCase = SpeechTaHifiGanConfig()
UpperCAmelCase = SpeechTaHifiGan(lowercase_ )
UpperCAmelCase = torch.load(lowercase_ )
load_weights(orig_checkpoint['model']['generator'] , lowercase_ , lowercase_ )
UpperCAmelCase = np.load(lowercase_ )
UpperCAmelCase = stats[0].reshape(-1 )
UpperCAmelCase = stats[1].reshape(-1 )
UpperCAmelCase = torch.from_numpy(lowercase_ ).float()
UpperCAmelCase = torch.from_numpy(lowercase_ ).float()
model.save_pretrained(lowercase_ )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(lowercase_ )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
snake_case_ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 356 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case_ = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 181 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A_ = {'''processing_wav2vec2_with_lm''': ['''Wav2Vec2ProcessorWithLM''']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 64 |
'''simple docstring'''
def lowerCamelCase ( ):
"""simple docstring"""
return 1
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int = 200 ):
"""simple docstring"""
return two_pound(lowerCAmelCase )
if __name__ == "__main__":
print(solution(int(input().strip()))) | 331 | 0 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def _UpperCamelCase ( snake_case__ ) -> str:
return 1 / (1 + np.exp(-z ))
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Tuple:
return (-y * np.log(_UpperCAmelCase ) - (1 - y) * np.log(1 - h )).mean()
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Dict:
__UpperCAmelCase : Tuple = np.dot(_UpperCAmelCase, _UpperCAmelCase )
return np.sum(y * scores - np.log(1 + np.exp(_UpperCAmelCase ) ) )
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__=7_0000 ) -> Tuple:
__UpperCAmelCase : List[str] = np.zeros(x.shape[1] )
for iterations in range(_UpperCAmelCase ):
__UpperCAmelCase : Any = np.dot(_UpperCAmelCase, _UpperCAmelCase )
__UpperCAmelCase : Any = sigmoid_function(_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = np.dot(x.T, h - y ) / y.size
__UpperCAmelCase : Union[str, Any] = theta - alpha * gradient # updating the weights
__UpperCAmelCase : Optional[Any] = np.dot(_UpperCAmelCase, _UpperCAmelCase )
__UpperCAmelCase : List[Any] = sigmoid_function(_UpperCAmelCase )
__UpperCAmelCase : str = cost_function(_UpperCAmelCase, _UpperCAmelCase )
if iterations % 100 == 0:
print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
_snake_case = datasets.load_iris()
_snake_case = iris.data[:, :2]
_snake_case = (iris.target != 0) * 1
_snake_case = 0.1
_snake_case = logistic_reg(alpha, x, y, max_iterations=70000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def _UpperCamelCase ( snake_case__ ) -> Optional[int]:
return sigmoid_function(
np.dot(_UpperCAmelCase, _UpperCAmelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
(_snake_case) = (x[:, 0].min(), x[:, 0].max())
(_snake_case) = (x[:, 1].min(), x[:, 1].max())
(_snake_case) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
_snake_case = np.c_[xxa.ravel(), xxa.ravel()]
_snake_case = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 365 | from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
'''configuration_trajectory_transformer''': [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TrajectoryTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrajectoryTransformerModel''',
'''TrajectoryTransformerPreTrainedModel''',
'''load_tf_weights_in_trajectory_transformer''',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 342 | 0 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def snake_case_ ( snake_case ) -> str:
return "".join(sorted(snake_case ) )
def snake_case_ ( snake_case ) -> list[str]:
return word_by_signature[signature(snake_case )]
__lowerCAmelCase = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
__lowerCAmelCase = sorted({word.strip().lower() for word in data.splitlines()})
__lowerCAmelCase = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__lowerCAmelCase = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 196 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
__lowerCAmelCase = logging.get_logger(__name__)
# General docstring
__lowerCAmelCase = '''RegNetConfig'''
# Base docstring
__lowerCAmelCase = '''facebook/regnet-y-040'''
__lowerCAmelCase = [1, 10_88, 7, 7]
# Image classification docstring
__lowerCAmelCase = '''facebook/regnet-y-040'''
__lowerCAmelCase = '''tabby, tabby cat'''
__lowerCAmelCase = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 3 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = "relu" , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
lowercase__: Any = nn.Convad(
lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=kernel_size // 2 , groups=lowerCAmelCase__ , bias=lowerCAmelCase__ , )
lowercase__: str = nn.BatchNormad(lowerCAmelCase__ )
lowercase__: Union[str, Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
lowercase__: List[str] = self.convolution(lowerCAmelCase__ )
lowercase__: Optional[Any] = self.normalization(lowerCAmelCase__ )
lowercase__: Union[str, Any] = self.activation(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
lowercase__: Dict = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowercase__: Dict = config.num_channels
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: Tuple = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
lowercase__: Optional[int] = self.embedder(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 2 ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
lowercase__: Optional[Any] = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , stride=lowerCAmelCase__ , bias=lowerCAmelCase__ )
lowercase__: Union[str, Any] = nn.BatchNormad(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Tensor:
'''simple docstring'''
lowercase__: Any = self.convolution(lowerCAmelCase__ )
lowercase__: str = self.normalization(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
super().__init__()
lowercase__: Any = nn.AdaptiveAvgPoolad((1, 1) )
lowercase__: str = nn.Sequential(
nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 ) , nn.Sigmoid() , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
# b c h w -> b c 1 1
lowercase__: str = self.pooler(lowerCAmelCase__ )
lowercase__: List[str] = self.attention(lowerCAmelCase__ )
lowercase__: List[Any] = hidden_state * attention
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1 ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase__: str = in_channels != out_channels or stride != 1
lowercase__: Optional[int] = max(1 , out_channels // config.groups_width )
lowercase__: Union[str, Any] = (
RegNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
lowercase__: Dict = nn.Sequential(
RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , groups=lowerCAmelCase__ , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=lowerCAmelCase__ ) , )
lowercase__: Tuple = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: Dict = hidden_state
lowercase__: Union[str, Any] = self.layer(lowerCAmelCase__ )
lowercase__: int = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
lowercase__: Optional[int] = self.activation(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1 ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase__: Optional[int] = in_channels != out_channels or stride != 1
lowercase__: List[str] = max(1 , out_channels // config.groups_width )
lowercase__: Any = (
RegNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
lowercase__: str = nn.Sequential(
RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , groups=lowerCAmelCase__ , activation=config.hidden_act ) , RegNetSELayer(lowerCAmelCase__ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=lowerCAmelCase__ ) , )
lowercase__: Union[str, Any] = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
lowercase__: Optional[Any] = hidden_state
lowercase__: Optional[int] = self.layer(lowerCAmelCase__ )
lowercase__: str = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
lowercase__: Optional[int] = self.activation(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , ) -> Tuple:
'''simple docstring'''
super().__init__()
lowercase__: Optional[int] = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
lowercase__: str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , ) , *[layer(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__: str = self.layers(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase__: int = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowerCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase__: int = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase__ , config.depths[1:] ):
self.stages.append(RegNetStage(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , depth=lowerCAmelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = True ) -> BaseModelOutputWithNoAttention:
'''simple docstring'''
lowercase__: List[str] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase__: Optional[Any] = hidden_states + (hidden_state,)
lowercase__: List[Any] = stage_module(lowerCAmelCase__ )
if output_hidden_states:
lowercase__: Optional[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ )
class __a ( __UpperCamelCase ):
__lowercase : Dict = RegNetConfig
__lowercase : Dict = 'regnet'
__lowercase : str = 'pixel_values'
__lowercase : List[str] = True
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowerCAmelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Optional[Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase__: Any = value
__lowerCAmelCase = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__lowerCAmelCase = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , __UpperCamelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __a ( __UpperCamelCase ):
def __init__( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
lowercase__: Tuple = config
lowercase__: List[str] = RegNetEmbeddings(lowerCAmelCase__ )
lowercase__: Optional[int] = RegNetEncoder(lowerCAmelCase__ )
lowercase__: Optional[Any] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None ) -> BaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
lowercase__: List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__: Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__: Any = self.embedder(lowerCAmelCase__ )
lowercase__: List[Any] = self.encoder(
lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
lowercase__: Optional[Any] = encoder_outputs[0]
lowercase__: Optional[int] = self.pooler(lowerCAmelCase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __UpperCamelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __a ( __UpperCamelCase ):
def __init__( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
lowercase__: Dict = config.num_labels
lowercase__: Dict = RegNetModel(lowerCAmelCase__ )
# classification head
lowercase__: str = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> ImageClassifierOutputWithNoAttention:
'''simple docstring'''
lowercase__: str = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__: Optional[int] = self.regnet(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
lowercase__: Dict = outputs.pooler_output if return_dict else outputs[1]
lowercase__: List[str] = self.classifier(lowerCAmelCase__ )
lowercase__: Optional[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase__: Dict = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase__: Optional[int] = 'single_label_classification'
else:
lowercase__: Tuple = 'multi_label_classification'
if self.config.problem_type == "regression":
lowercase__: List[Any] = MSELoss()
if self.num_labels == 1:
lowercase__: Optional[int] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase__: int = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config.problem_type == "single_label_classification":
lowercase__: Dict = CrossEntropyLoss()
lowercase__: Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase__: List[Any] = BCEWithLogitsLoss()
lowercase__: Any = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
if not return_dict:
lowercase__: int = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states )
| 196 | 1 |
'''simple docstring'''
from bisect import bisect
from itertools import accumulate
def UpperCAmelCase_ (__a : Dict , __a : List[Any] , __a : Tuple , __a : Tuple ):
"""simple docstring"""
_a : int = sorted(zip(__a , __a ) , key=lambda __a : x[0] / x[1] , reverse=__a )
_a : str = [i[0] for i in r], [i[1] for i in r]
_a : str = list(accumulate(__a ) )
_a : Optional[int] = bisect(__a , __a )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
'''simple docstring'''
__lowerCAmelCase = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__lowerCAmelCase = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = 'Morse code here!'
print(__a )
_a : Tuple = encrypt(__a )
print(__a )
_a : str = decrypt(__a )
print(__a )
if __name__ == "__main__":
main()
| 5 | 0 |
from scipy.stats import pearsonr
import datasets
UpperCAmelCase : List[Any] = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
UpperCAmelCase : str = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
UpperCAmelCase : List[Any] = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
"""simple docstring"""
def __A ( self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def __A ( self , A , A , A=False ) -> str:
'''simple docstring'''
if return_pvalue:
lowerCamelCase = pearsonr(lowercase__ , lowercase__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowercase__ , lowercase__ )[0] )}
| 252 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : Optional[int] = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = ['PoolFormerFeatureExtractor']
A_ : Dict = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
A_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 333 | 0 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def lowerCamelCase ( SCREAMING_SNAKE_CASE = "" ):
'''simple docstring'''
__UpperCamelCase :Dict = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
__UpperCamelCase :Any = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE ).text , '''html.parser''' )
__UpperCamelCase :List[Any] = soup.find_all('''td''' , attrs='''titleColumn''' )
__UpperCamelCase :Union[str, Any] = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
}
def lowerCamelCase ( SCREAMING_SNAKE_CASE = "IMDb_Top_250_Movies.csv" ):
'''simple docstring'''
__UpperCamelCase :Any = get_imdb_top_aaa_movies()
with open(SCREAMING_SNAKE_CASE , '''w''' , newline='''''' ) as out_file:
__UpperCamelCase :int = csv.writer(SCREAMING_SNAKE_CASE )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 363 | from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__lowercase = logging.get_logger(__name__)
# General docstring
__lowercase = '''MobileNetV1Config'''
# Base docstring
__lowercase = '''google/mobilenet_v1_1.0_224'''
__lowercase = [1, 1024, 7, 7]
# Image classification docstring
__lowercase = '''google/mobilenet_v1_1.0_224'''
__lowercase = '''tabby, tabby cat'''
__lowercase = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
__UpperCamelCase :Tuple = {}
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Dict = model.mobilenet_va
else:
__UpperCamelCase :str = model
__UpperCamelCase :int = '''MobilenetV1/Conv2d_0/'''
__UpperCamelCase :str = backbone.conv_stem.convolution.weight
__UpperCamelCase :int = backbone.conv_stem.normalization.bias
__UpperCamelCase :Union[str, Any] = backbone.conv_stem.normalization.weight
__UpperCamelCase :Optional[int] = backbone.conv_stem.normalization.running_mean
__UpperCamelCase :Optional[int] = backbone.conv_stem.normalization.running_var
for i in range(13 ):
__UpperCamelCase :Optional[Any] = i + 1
__UpperCamelCase :Optional[int] = i * 2
__UpperCamelCase :List[Any] = backbone.layer[pt_index]
__UpperCamelCase :Tuple = f"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
__UpperCamelCase :Any = pointer.convolution.weight
__UpperCamelCase :Dict = pointer.normalization.bias
__UpperCamelCase :List[str] = pointer.normalization.weight
__UpperCamelCase :Any = pointer.normalization.running_mean
__UpperCamelCase :List[str] = pointer.normalization.running_var
__UpperCamelCase :Union[str, Any] = backbone.layer[pt_index + 1]
__UpperCamelCase :List[str] = f"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
__UpperCamelCase :Optional[Any] = pointer.convolution.weight
__UpperCamelCase :Dict = pointer.normalization.bias
__UpperCamelCase :int = pointer.normalization.weight
__UpperCamelCase :Optional[int] = pointer.normalization.running_mean
__UpperCamelCase :Optional[int] = pointer.normalization.running_var
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Any = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
__UpperCamelCase :Union[str, Any] = model.classifier.weight
__UpperCamelCase :int = model.classifier.bias
return tf_to_pt_map
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
__UpperCamelCase :Any = tf.train.list_variables(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = {}
for name, shape in init_vars:
logger.info(f"""Loading TF weight {name} with shape {shape}""" )
__UpperCamelCase :str = tf.train.load_variable(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = array
# Build TF to PyTorch weights loading map
__UpperCamelCase :Optional[Any] = _build_tf_to_pytorch_map(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for name, pointer in tf_to_pt_map.items():
logger.info(f"""Importing {name}""" )
if name not in tf_weights:
logger.info(f"""{name} not in tf pre-trained weights, skipping""" )
continue
__UpperCamelCase :Optional[Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
__UpperCamelCase :Optional[int] = np.transpose(SCREAMING_SNAKE_CASE , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
__UpperCamelCase :Tuple = array.squeeze().transpose()
else:
__UpperCamelCase :Union[str, Any] = np.transpose(SCREAMING_SNAKE_CASE , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(f"""Initialize PyTorch weight {name} {array.shape}""" )
__UpperCamelCase :Optional[int] = torch.from_numpy(SCREAMING_SNAKE_CASE )
tf_weights.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
tf_weights.pop(name + '''/RMSProp''' , SCREAMING_SNAKE_CASE )
tf_weights.pop(name + '''/RMSProp_1''' , SCREAMING_SNAKE_CASE )
tf_weights.pop(name + '''/ExponentialMovingAverage''' , SCREAMING_SNAKE_CASE )
logger.info(f"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}""" )
return model
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase :str = features.shape[-2:]
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = conv_layer.stride
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = conv_layer.kernel_size
if in_height % stride_height == 0:
__UpperCamelCase :Optional[int] = max(kernel_height - stride_height , 0 )
else:
__UpperCamelCase :List[Any] = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
__UpperCamelCase :List[str] = max(kernel_width - stride_width , 0 )
else:
__UpperCamelCase :Tuple = max(kernel_width - (in_width % stride_width) , 0 )
__UpperCamelCase :Any = pad_along_width // 2
__UpperCamelCase :Tuple = pad_along_width - pad_left
__UpperCamelCase :Union[str, Any] = pad_along_height // 2
__UpperCamelCase :str = pad_along_height - pad_top
__UpperCamelCase :Optional[Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''constant''' , 0.0 )
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = 1 , __lowercase = 1 , __lowercase = False , __lowercase = True , __lowercase = True , ) -> None:
super().__init__()
__UpperCamelCase :str = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""")
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""")
__UpperCamelCase :Any = 0 if config.tf_padding else int((kernel_size - 1) / 2)
__UpperCamelCase :List[Any] = nn.Convad(
in_channels=__lowercase , out_channels=__lowercase , kernel_size=__lowercase , stride=__lowercase , padding=__lowercase , groups=__lowercase , bias=__lowercase , padding_mode='''zeros''' , )
if use_normalization:
__UpperCamelCase :str = nn.BatchNormad(
num_features=__lowercase , eps=config.layer_norm_eps , momentum=0.99_97 , affine=__lowercase , track_running_stats=__lowercase , )
else:
__UpperCamelCase :Tuple = None
if use_activation:
if isinstance(__lowercase , __lowercase):
__UpperCamelCase :Union[str, Any] = ACTaFN[use_activation]
elif isinstance(config.hidden_act , __lowercase):
__UpperCamelCase :Dict = ACTaFN[config.hidden_act]
else:
__UpperCamelCase :List[Any] = config.hidden_act
else:
__UpperCamelCase :Optional[Any] = None
def UpperCamelCase__ ( self , __lowercase) -> torch.Tensor:
if self.config.tf_padding:
__UpperCamelCase :Any = apply_tf_padding(__lowercase , self.convolution)
__UpperCamelCase :str = self.convolution(__lowercase)
if self.normalization is not None:
__UpperCamelCase :Any = self.normalization(__lowercase)
if self.activation is not None:
__UpperCamelCase :List[str] = self.activation(__lowercase)
return features
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : List[str] = MobileNetVaConfig
a__ : Dict = load_tf_weights_in_mobilenet_va
a__ : Tuple = """mobilenet_v1"""
a__ : Optional[Any] = """pixel_values"""
a__ : int = False
def UpperCamelCase__ ( self , __lowercase) -> None:
if isinstance(__lowercase , (nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__lowercase , nn.BatchNormad):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
__lowercase = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__lowercase = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"""The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.""" , UpperCAmelCase_ , )
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase = True) -> Optional[Any]:
super().__init__(__lowercase)
__UpperCamelCase :List[str] = config
__UpperCamelCase :Any = 32
__UpperCamelCase :List[str] = max(int(depth * config.depth_multiplier) , config.min_depth)
__UpperCamelCase :Union[str, Any] = MobileNetVaConvLayer(
__lowercase , in_channels=config.num_channels , out_channels=__lowercase , kernel_size=3 , stride=2 , )
__UpperCamelCase :str = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__UpperCamelCase :Any = nn.ModuleList()
for i in range(13):
__UpperCamelCase :str = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__UpperCamelCase :Tuple = max(int(depth * config.depth_multiplier) , config.min_depth)
self.layer.append(
MobileNetVaConvLayer(
__lowercase , in_channels=__lowercase , out_channels=__lowercase , kernel_size=3 , stride=strides[i] , groups=__lowercase , ))
self.layer.append(
MobileNetVaConvLayer(
__lowercase , in_channels=__lowercase , out_channels=__lowercase , kernel_size=1 , ))
__UpperCamelCase :str = nn.AdaptiveAvgPoolad((1, 1)) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase__ ( self , __lowercase) -> Union[str, Any]:
raise NotImplementedError
@add_start_docstrings_to_model_forward(__lowercase)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowercase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase__ ( self , __lowercase = None , __lowercase = None , __lowercase = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
__UpperCamelCase :Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase :str = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''')
__UpperCamelCase :int = self.conv_stem(__lowercase)
__UpperCamelCase :List[str] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
__UpperCamelCase :Optional[Any] = layer_module(__lowercase)
if output_hidden_states:
__UpperCamelCase :int = all_hidden_states + (hidden_states,)
__UpperCamelCase :Any = hidden_states
if self.pooler is not None:
__UpperCamelCase :str = torch.flatten(self.pooler(__lowercase) , start_dim=1)
else:
__UpperCamelCase :Tuple = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None)
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowercase , pooler_output=__lowercase , hidden_states=__lowercase , )
@add_start_docstrings(
"""
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , UpperCAmelCase_ , )
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , __lowercase) -> None:
super().__init__(__lowercase)
__UpperCamelCase :int = config.num_labels
__UpperCamelCase :Optional[int] = MobileNetVaModel(__lowercase)
__UpperCamelCase :Optional[Any] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__UpperCamelCase :str = nn.Dropout(config.classifier_dropout_prob , inplace=__lowercase)
__UpperCamelCase :Dict = nn.Linear(__lowercase , config.num_labels) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowercase)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase__ ( self , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
__UpperCamelCase :List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase :Tuple = self.mobilenet_va(__lowercase , output_hidden_states=__lowercase , return_dict=__lowercase)
__UpperCamelCase :List[str] = outputs.pooler_output if return_dict else outputs[1]
__UpperCamelCase :Union[str, Any] = self.classifier(self.dropout(__lowercase))
__UpperCamelCase :int = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__UpperCamelCase :Tuple = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__UpperCamelCase :Union[str, Any] = '''single_label_classification'''
else:
__UpperCamelCase :Optional[Any] = '''multi_label_classification'''
if self.config.problem_type == "regression":
__UpperCamelCase :Any = MSELoss()
if self.num_labels == 1:
__UpperCamelCase :List[str] = loss_fct(logits.squeeze() , labels.squeeze())
else:
__UpperCamelCase :Dict = loss_fct(__lowercase , __lowercase)
elif self.config.problem_type == "single_label_classification":
__UpperCamelCase :Optional[int] = CrossEntropyLoss()
__UpperCamelCase :str = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
__UpperCamelCase :Dict = BCEWithLogitsLoss()
__UpperCamelCase :List[str] = loss_fct(__lowercase , __lowercase)
if not return_dict:
__UpperCamelCase :Tuple = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__lowercase , logits=__lowercase , hidden_states=outputs.hidden_states , )
| 105 | 0 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : Optional[int] = old_name
if "patch_embed" in old_name:
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Tuple = old_name.split('''.''' )
if layer == "0":
__lowerCAmelCase : int = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
__lowerCAmelCase : Union[str, Any] = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
__lowerCAmelCase : List[str] = old_name.replace('''3''' , '''convolution2''' )
else:
__lowerCAmelCase : Optional[int] = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(r'''\d\.\d''' , lowercase__ ):
__lowerCAmelCase : List[Any] = r'''\b\d{2}\b'''
if bool(re.search(lowercase__ , lowercase__ ) ):
__lowerCAmelCase : Dict = re.search(r'''\d\.\d\d.''' , lowercase__ ).group()
else:
__lowerCAmelCase : Optional[Any] = re.search(r'''\d\.\d.''' , lowercase__ ).group()
if int(match[0] ) < 6:
__lowerCAmelCase : Optional[Any] = old_name.replace(lowercase__ , '''''' )
__lowerCAmelCase : Union[str, Any] = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
__lowerCAmelCase : int = '''intermediate_stages.''' + trimmed_name
else:
__lowerCAmelCase : Optional[int] = old_name.replace(lowercase__ , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
__lowerCAmelCase : List[str] = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
__lowerCAmelCase : Union[str, Any] = str(int(match[2] ) - num_meta4D_last_stage )
__lowerCAmelCase : Union[str, Any] = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
__lowerCAmelCase : Optional[Any] = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
__lowerCAmelCase : int = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
__lowerCAmelCase : Tuple = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
__lowerCAmelCase : Optional[int] = trimmed_name.replace('''fc2''' , '''linear_out''' )
__lowerCAmelCase : str = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(r'''.\d.''' , lowercase__ ):
__lowerCAmelCase : Any = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
__lowerCAmelCase : Union[str, Any] = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__lowerCAmelCase : Any = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__lowerCAmelCase : Optional[Any] = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
__lowerCAmelCase : str = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
__lowerCAmelCase : Optional[int] = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
__lowerCAmelCase : Optional[int] = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
__lowerCAmelCase : str = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__lowerCAmelCase : Optional[int] = new_name.replace('''norm''' , '''layernorm''' )
__lowerCAmelCase : List[Any] = '''efficientformer.''' + new_name
else:
__lowerCAmelCase : int = '''efficientformer.encoder.''' + new_name
return new_name
def _lowercase ( lowercase__ , lowercase__ ):
for key in checkpoint.copy().keys():
__lowerCAmelCase : Any = checkpoint.pop(lowercase__ )
__lowerCAmelCase : Tuple = val
return checkpoint
def _lowercase ( ):
__lowerCAmelCase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowerCAmelCase : Tuple = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return image
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : Optional[int] = torch.load(lowercase__ , map_location='''cpu''' )['''model''']
__lowerCAmelCase : Union[str, Any] = EfficientFormerConfig.from_json_file(lowercase__ )
__lowerCAmelCase : Dict = EfficientFormerForImageClassificationWithTeacher(lowercase__ )
__lowerCAmelCase : Optional[Any] = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
__lowerCAmelCase : int = config.depths[-1] - config.num_metaad_blocks + 1
__lowerCAmelCase : Optional[Any] = convert_torch_checkpoint(lowercase__ , lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
__lowerCAmelCase : int = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
__lowerCAmelCase : List[str] = prepare_img()
__lowerCAmelCase : int = 2_5_6
__lowerCAmelCase : Optional[Any] = 2_2_4
__lowerCAmelCase : Tuple = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
__lowerCAmelCase : List[str] = processor(images=lowercase__ , return_tensors='''pt''' ).pixel_values
# original processing pipeline
__lowerCAmelCase : List[str] = Compose(
[
Resize(lowercase__ , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(lowercase__ ),
ToTensor(),
Normalize(lowercase__ , lowercase__ ),
] )
__lowerCAmelCase : List[str] = image_transforms(lowercase__ ).unsqueeze(0 )
assert torch.allclose(lowercase__ , lowercase__ )
__lowerCAmelCase : int = model(lowercase__ )
__lowerCAmelCase : Any = outputs.logits
__lowerCAmelCase : List[str] = (1, 1_0_0_0)
if "l1" in model_name:
__lowerCAmelCase : Optional[int] = torch.Tensor(
[-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9, -0.5_1_2_4, 0.4_1_8_3, -0.6_7_9_3, -1.3_7_7_7, -0.0_8_9_3, -0.7_3_5_8, -2.4_3_2_8] )
assert torch.allclose(logits[0, :1_0] , lowercase__ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__lowerCAmelCase : str = torch.Tensor(
[-1.3_1_5_0, -1.5_4_5_6, -1.2_5_5_6, -0.8_4_9_6, -0.7_1_2_7, -0.7_8_9_7, -0.9_7_2_8, -0.3_0_5_2, 0.3_7_5_1, -0.3_1_2_7] )
assert torch.allclose(logits[0, :1_0] , lowercase__ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__lowerCAmelCase : Optional[int] = torch.Tensor(
[-1.0_2_8_3, -1.4_1_3_1, -0.5_6_4_4, -1.3_1_1_5, -0.5_7_8_5, -1.2_0_4_9, -0.7_5_2_8, 0.1_9_9_2, -0.3_8_2_2, -0.0_8_7_8] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(lowercase__ )
print(f"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message='''Add model''' , use_temp_dir=lowercase__ , )
processor.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message='''Add image processor''' , use_temp_dir=lowercase__ , )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
_UpperCamelCase = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 275 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_UpperCamelCase = logging.get_logger(__name__)
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
def constraint_to_multiple_of(lowercase__ , lowercase__ , lowercase__=0 , lowercase__=None ):
__lowerCAmelCase : int = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
__lowerCAmelCase : Optional[int] = math.floor(val / multiple ) * multiple
if x < min_val:
__lowerCAmelCase : Any = math.ceil(val / multiple ) * multiple
return x
__lowerCAmelCase : Dict = (output_size, output_size) if isinstance(lowercase__ , lowercase__ ) else output_size
__lowerCAmelCase, __lowerCAmelCase : Optional[Any] = get_image_size(lowercase__ )
__lowerCAmelCase, __lowerCAmelCase : int = output_size
# determine new height and width
__lowerCAmelCase : Optional[Any] = output_height / input_height
__lowerCAmelCase : List[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
__lowerCAmelCase : str = scale_width
else:
# fit height
__lowerCAmelCase : str = scale_height
__lowerCAmelCase : Any = constraint_to_multiple_of(scale_height * input_height , multiple=lowercase__ )
__lowerCAmelCase : Union[str, Any] = constraint_to_multiple_of(scale_width * input_width , multiple=lowercase__ )
return (new_height, new_width)
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = ["""pixel_values"""]
def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BILINEAR , A_ = False , A_ = 1 , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , **A_ , ) ->None:
'''simple docstring'''
super().__init__(**A_ )
__lowerCAmelCase : Union[str, Any] = size if size is not None else {'''height''': 384, '''width''': 384}
__lowerCAmelCase : Dict = get_size_dict(A_ )
__lowerCAmelCase : Optional[Any] = do_resize
__lowerCAmelCase : int = size
__lowerCAmelCase : Dict = keep_aspect_ratio
__lowerCAmelCase : List[Any] = ensure_multiple_of
__lowerCAmelCase : Tuple = resample
__lowerCAmelCase : Dict = do_rescale
__lowerCAmelCase : Any = rescale_factor
__lowerCAmelCase : List[Any] = do_normalize
__lowerCAmelCase : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCAmelCase : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , A_ , A_ , A_ = False , A_ = 1 , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ) ->np.ndarray:
'''simple docstring'''
__lowerCAmelCase : int = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__lowerCAmelCase : Union[str, Any] = get_resize_output_image_size(
A_ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=A_ , multiple=A_ , )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def UpperCamelCase__ ( self , A_ , A_ , A_ = None , **A_ , ) ->Dict:
'''simple docstring'''
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ = None , **A_ , ) ->np.ndarray:
'''simple docstring'''
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def UpperCamelCase__ ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) ->PIL.Image.Image:
'''simple docstring'''
__lowerCAmelCase : int = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase : Optional[int] = size if size is not None else self.size
__lowerCAmelCase : Union[str, Any] = get_size_dict(A_ )
__lowerCAmelCase : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__lowerCAmelCase : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__lowerCAmelCase : Tuple = resample if resample is not None else self.resample
__lowerCAmelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase : str = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase : Optional[Any] = image_std if image_std is not None else self.image_std
__lowerCAmelCase : Optional[Any] = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowerCAmelCase : Any = [to_numpy_array(A_ ) for image in images]
if do_resize:
__lowerCAmelCase : Optional[Any] = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_rescale:
__lowerCAmelCase : Tuple = [self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
__lowerCAmelCase : str = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
__lowerCAmelCase : Union[str, Any] = [to_channel_dimension_format(A_ , A_ ) for image in images]
__lowerCAmelCase : Dict = {'''pixel_values''': images}
return BatchFeature(data=A_ , tensor_type=A_ )
def UpperCamelCase__ ( self , A_ , A_ = None ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_ ) != len(A_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(A_ ):
__lowerCAmelCase : Optional[int] = target_sizes.numpy()
__lowerCAmelCase : List[str] = []
for idx in range(len(A_ ) ):
__lowerCAmelCase : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=A_ )
__lowerCAmelCase : str = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A_ )
else:
__lowerCAmelCase : Any = logits.argmax(dim=1 )
__lowerCAmelCase : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 275 | 1 |
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ :
"""simple docstring"""
def __init__( self :Optional[int] , snake_case :int , snake_case :List[str]=3 , snake_case :str=32 , snake_case :Optional[Any]=3 , snake_case :Dict=10 , snake_case :Optional[int]=[10, 20, 30, 40] , snake_case :Optional[int]=[1, 1, 2, 1] , snake_case :List[Any]=True , snake_case :Any=True , snake_case :str="relu" , snake_case :int=3 , snake_case :Any=None , ):
'''simple docstring'''
A_ : Optional[Any] = parent
A_ : Any = batch_size
A_ : Any = image_size
A_ : int = num_channels
A_ : int = embeddings_size
A_ : List[Any] = hidden_sizes
A_ : List[Any] = depths
A_ : int = is_training
A_ : Union[str, Any] = use_labels
A_ : Any = hidden_act
A_ : Tuple = num_labels
A_ : List[Any] = scope
A_ : str = len(_a )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Any = None
if self.use_labels:
A_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
A_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :Tuple , snake_case :Dict , snake_case :Any ):
'''simple docstring'''
A_ : Dict = RegNetModel(config=_a )
model.to(_a )
model.eval()
A_ : Union[str, Any] = model(_a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :Union[str, Any] , snake_case :Tuple , snake_case :Any ):
'''simple docstring'''
A_ : int = self.num_labels
A_ : Tuple = RegNetForImageClassification(_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Optional[Any] = self.prepare_config_and_inputs()
A_ : Union[str, Any] = config_and_inputs
A_ : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
__UpperCamelCase = (
{"""feature-extraction""": RegNetModel, """image-classification""": RegNetForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = RegNetModelTester(self )
A_ : Dict = ConfigTester(self , config_class=_a , has_text_modality=_a )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Any = model_class(_a )
A_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Optional[int] = [*signature.parameters.keys()]
A_ : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : str = model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
def check_hidden_states_output(snake_case :str , snake_case :Optional[Any] , snake_case :Any ):
A_ : Any = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
A_ : Optional[int] = model(**self._prepare_for_class(_a , _a ) )
A_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : List[str] = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : Tuple = layer_type
A_ : Union[str, Any] = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : Dict = True
check_hidden_states_output(_a , _a , _a )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[int] = RegNetModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __snake_case ( ) -> Dict:
A_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : int = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
A_ : Union[str, Any] = self.default_image_processor
A_ : Optional[int] = prepare_img()
A_ : int = image_processor(images=_a , return_tensors="pt" ).to(_a )
# forward pass
with torch.no_grad():
A_ : Dict = model(**_a )
# verify the logits
A_ : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _a )
A_ : List[Any] = torch.tensor([-0.4180, -1.5051, -3.4836] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 362 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Union[str, Any] = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 70 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : int = tempfile.mkdtemp()
lowerCAmelCase__ : Union[str, Any] = SamImageProcessor()
lowerCAmelCase__ : int = SamProcessor(__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname ,**__UpperCAmelCase ).image_processor
def UpperCAmelCase_ ( self ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ) -> Any:
lowerCAmelCase__ : Dict = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
lowerCAmelCase__ : Optional[int] = [Image.fromarray(np.moveaxis(__UpperCAmelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : List[str] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ : List[str] = self.get_image_processor(do_normalize=__UpperCAmelCase ,padding_value=1.0 )
lowerCAmelCase__ : Union[str, Any] = SamProcessor.from_pretrained(self.tmpdirname ,do_normalize=__UpperCAmelCase ,padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : Dict = self.get_image_processor()
lowerCAmelCase__ : List[str] = SamProcessor(image_processor=__UpperCAmelCase )
lowerCAmelCase__ : int = self.prepare_image_inputs()
lowerCAmelCase__ : List[Any] = image_processor(__UpperCAmelCase ,return_tensors="""np""" )
lowerCAmelCase__ : int = processor(images=__UpperCAmelCase ,return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
@require_torch
def UpperCAmelCase_ ( self ) -> List[Any]:
lowerCAmelCase__ : List[str] = self.get_image_processor()
lowerCAmelCase__ : Optional[Any] = SamProcessor(image_processor=__UpperCAmelCase )
lowerCAmelCase__ : int = [torch.ones((1, 3, 5, 5) )]
lowerCAmelCase__ : Union[str, Any] = [[1764, 2646]]
lowerCAmelCase__ : List[Any] = [[683, 1024]]
lowerCAmelCase__ : List[str] = processor.post_process_masks(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) )
lowerCAmelCase__ : int = processor.post_process_masks(
__UpperCAmelCase ,torch.tensor(__UpperCAmelCase ) ,torch.tensor(__UpperCAmelCase ) )
self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) )
# should also work with np
lowerCAmelCase__ : List[Any] = [np.ones((1, 3, 5, 5) )]
lowerCAmelCase__ : Dict = processor.post_process_masks(__UpperCAmelCase ,np.array(__UpperCAmelCase ) ,np.array(__UpperCAmelCase ) )
self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) )
lowerCAmelCase__ : int = [[1, 0], [0, 1]]
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : List[str] = processor.post_process_masks(__UpperCAmelCase ,np.array(__UpperCAmelCase ) ,np.array(__UpperCAmelCase ) )
@require_vision
@require_tf
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ : Optional[Any] = tempfile.mkdtemp()
lowerCAmelCase__ : Optional[Any] = SamImageProcessor()
lowerCAmelCase__ : Optional[int] = SamProcessor(__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> List[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname ,**__UpperCAmelCase ).image_processor
def UpperCAmelCase_ ( self ) -> Dict:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ : str = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
lowerCAmelCase__ : Optional[Any] = [Image.fromarray(np.moveaxis(__UpperCAmelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ : Union[str, Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ : Dict = self.get_image_processor(do_normalize=__UpperCAmelCase ,padding_value=1.0 )
lowerCAmelCase__ : List[Any] = SamProcessor.from_pretrained(self.tmpdirname ,do_normalize=__UpperCAmelCase ,padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : Tuple = self.get_image_processor()
lowerCAmelCase__ : int = SamProcessor(image_processor=__UpperCAmelCase )
lowerCAmelCase__ : int = self.prepare_image_inputs()
lowerCAmelCase__ : Any = image_processor(__UpperCAmelCase ,return_tensors="""np""" )
lowerCAmelCase__ : Union[str, Any] = processor(images=__UpperCAmelCase ,return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
@require_tf
def UpperCAmelCase_ ( self ) -> Any:
lowerCAmelCase__ : Union[str, Any] = self.get_image_processor()
lowerCAmelCase__ : Optional[int] = SamProcessor(image_processor=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = [tf.ones((1, 3, 5, 5) )]
lowerCAmelCase__ : str = [[1764, 2646]]
lowerCAmelCase__ : Optional[Any] = [[683, 1024]]
lowerCAmelCase__ : Optional[Any] = processor.post_process_masks(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,return_tensors="""tf""" )
self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) )
lowerCAmelCase__ : Union[str, Any] = processor.post_process_masks(
__UpperCAmelCase ,tf.convert_to_tensor(__UpperCAmelCase ) ,tf.convert_to_tensor(__UpperCAmelCase ) ,return_tensors="""tf""" ,)
self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) )
# should also work with np
lowerCAmelCase__ : Union[str, Any] = [np.ones((1, 3, 5, 5) )]
lowerCAmelCase__ : Any = processor.post_process_masks(
__UpperCAmelCase ,np.array(__UpperCAmelCase ) ,np.array(__UpperCAmelCase ) ,return_tensors="""tf""" )
self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) )
lowerCAmelCase__ : List[Any] = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
lowerCAmelCase__ : List[Any] = processor.post_process_masks(
__UpperCAmelCase ,np.array(__UpperCAmelCase ) ,np.array(__UpperCAmelCase ) ,return_tensors="""tf""" )
@require_vision
@require_torchvision
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : Any = tempfile.mkdtemp()
lowerCAmelCase__ : Optional[Any] = SamImageProcessor()
lowerCAmelCase__ : Tuple = SamProcessor(__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> List[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname ,**__UpperCAmelCase ).image_processor
def UpperCAmelCase_ ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : Optional[Any] = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
lowerCAmelCase__ : Any = [Image.fromarray(np.moveaxis(__UpperCAmelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def UpperCAmelCase_ ( self ) -> Any:
lowerCAmelCase__ : Any = self.get_image_processor()
lowerCAmelCase__ : List[Any] = SamProcessor(image_processor=__UpperCAmelCase )
lowerCAmelCase__ : Dict = np.random.randint(0 ,2 ,size=(1, 3, 5, 5) ).astype(np.floataa )
lowerCAmelCase__ : int = [tf.convert_to_tensor(__UpperCAmelCase )]
lowerCAmelCase__ : Dict = [torch.tensor(__UpperCAmelCase )]
lowerCAmelCase__ : Optional[int] = [[1764, 2646]]
lowerCAmelCase__ : Dict = [[683, 1024]]
lowerCAmelCase__ : Optional[Any] = processor.post_process_masks(
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,return_tensors="""tf""" )
lowerCAmelCase__ : List[str] = processor.post_process_masks(
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def UpperCAmelCase_ ( self ) -> List[Any]:
lowerCAmelCase__ : Tuple = self.get_image_processor()
lowerCAmelCase__ : Tuple = SamProcessor(image_processor=__UpperCAmelCase )
lowerCAmelCase__ : int = self.prepare_image_inputs()
lowerCAmelCase__ : Union[str, Any] = image_processor(__UpperCAmelCase ,return_tensors="""pt""" )["""pixel_values"""].numpy()
lowerCAmelCase__ : Dict = processor(images=__UpperCAmelCase ,return_tensors="""pt""" )["""pixel_values"""].numpy()
lowerCAmelCase__ : Optional[Any] = image_processor(__UpperCAmelCase ,return_tensors="""tf""" )["""pixel_values"""].numpy()
lowerCAmelCase__ : int = processor(images=__UpperCAmelCase ,return_tensors="""tf""" )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(__UpperCAmelCase ,__UpperCAmelCase ) )
self.assertTrue(np.allclose(__UpperCAmelCase ,__UpperCAmelCase ) )
self.assertTrue(np.allclose(__UpperCAmelCase ,__UpperCAmelCase ) )
| 37 |
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = ['image_processor', 'tokenizer']
lowercase = 'AutoImageProcessor'
lowercase = 'AutoTokenizer'
def __init__( self : int , lowerCamelCase : List[str]=None , lowerCamelCase : Union[str, Any]=None , **lowerCamelCase : str ) -> Tuple:
lowerCAmelCase_ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCamelCase , )
lowerCAmelCase_ : Tuple = kwargs.pop("""feature_extractor""" )
lowerCAmelCase_ : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : Optional[int] = self.image_processor
lowerCAmelCase_ : Any = False
def __call__( self : List[Any] , *lowerCamelCase : str , **lowerCamelCase : Tuple ) -> Union[str, Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase , **lowerCamelCase )
lowerCAmelCase_ : Any = kwargs.pop("""images""" , lowerCamelCase )
lowerCAmelCase_ : Dict = kwargs.pop("""text""" , lowerCamelCase )
if len(lowerCamelCase ) > 0:
lowerCAmelCase_ : str = args[0]
lowerCAmelCase_ : Dict = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
lowerCAmelCase_ : Any = self.image_processor(lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
if text is not None:
lowerCAmelCase_ : str = self.tokenizer(lowerCamelCase , **lowerCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCAmelCase_ : Dict = encodings["""input_ids"""]
return inputs
def __lowercase ( self : str , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Union[str, Any] ) -> List[str]:
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def __lowercase ( self : List[Any] , *lowerCamelCase : Any , **lowerCamelCase : Union[str, Any] ) -> List[str]:
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@contextmanager
def __lowercase ( self : str ) -> Union[str, Any]:
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
lowerCAmelCase_ : List[str] = True
lowerCAmelCase_ : Optional[Any] = self.tokenizer
yield
lowerCAmelCase_ : List[Any] = self.image_processor
lowerCAmelCase_ : List[str] = False
def __lowercase ( self : Dict , lowerCamelCase : List[str] , lowerCamelCase : str=False , lowerCamelCase : List[Any]=None ) -> Optional[int]:
if added_vocab is None:
lowerCAmelCase_ : str = self.tokenizer.get_added_vocab()
lowerCAmelCase_ : Union[str, Any] = {}
while tokens:
lowerCAmelCase_ : Dict = re.search(R"""<s_(.*?)>""" , lowerCamelCase , re.IGNORECASE )
if start_token is None:
break
lowerCAmelCase_ : Tuple = start_token.group(1 )
lowerCAmelCase_ : Tuple = re.search(RF'</s_{key}>' , lowerCamelCase , re.IGNORECASE )
lowerCAmelCase_ : Tuple = start_token.group()
if end_token is None:
lowerCAmelCase_ : str = tokens.replace(lowerCamelCase , """""" )
else:
lowerCAmelCase_ : List[str] = end_token.group()
lowerCAmelCase_ : Dict = re.escape(lowerCamelCase )
lowerCAmelCase_ : int = re.escape(lowerCamelCase )
lowerCAmelCase_ : Dict = re.search(F'{start_token_escaped}(.*?){end_token_escaped}' , lowerCamelCase , re.IGNORECASE )
if content is not None:
lowerCAmelCase_ : str = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowerCAmelCase_ : str = self.tokenajson(lowerCamelCase , is_inner_value=lowerCamelCase , added_vocab=lowerCamelCase )
if value:
if len(lowerCamelCase ) == 1:
lowerCAmelCase_ : List[Any] = value[0]
lowerCAmelCase_ : Optional[Any] = value
else: # leaf nodes
lowerCAmelCase_ : List[str] = []
for leaf in content.split(R"""<sep/>""" ):
lowerCAmelCase_ : Union[str, Any] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowerCAmelCase_ : Any = leaf[1:-2] # for categorical special tokens
output[key].append(lowerCamelCase )
if len(output[key] ) == 1:
lowerCAmelCase_ : Optional[Any] = output[key][0]
lowerCAmelCase_ : List[Any] = tokens[tokens.find(lowerCamelCase ) + len(lowerCamelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=lowerCamelCase , added_vocab=lowerCamelCase )
if len(lowerCamelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def __lowercase ( self : Dict ) -> int:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCamelCase , )
return self.image_processor_class
@property
def __lowercase ( self : Dict ) -> Optional[Any]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowerCamelCase , )
return self.image_processor
| 120 | 0 |
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''Hello world! cécé herlolip'''
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = FairseqRobertaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
roberta.eval() # disable dropout
UpperCamelCase = roberta.model.encoder.sentence_encoder
UpperCamelCase = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
UpperCamelCase = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" , _SCREAMING_SNAKE_CASE )
UpperCamelCase = XLMRobertaXLForSequenceClassification(_SCREAMING_SNAKE_CASE ) if classification_head else XLMRobertaXLForMaskedLM(_SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCamelCase = roberta_sent_encoder.embed_tokens.weight
UpperCamelCase = roberta_sent_encoder.embed_positions.weight
UpperCamelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
UpperCamelCase = roberta_sent_encoder.layer_norm.weight
UpperCamelCase = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCamelCase = model.roberta.encoder.layer[i]
UpperCamelCase = roberta_sent_encoder.layers[i]
UpperCamelCase = layer.attention
UpperCamelCase = roberta_layer.self_attn_layer_norm.weight
UpperCamelCase = roberta_layer.self_attn_layer_norm.bias
# self attention
UpperCamelCase = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
UpperCamelCase = roberta_layer.self_attn.q_proj.weight
UpperCamelCase = roberta_layer.self_attn.q_proj.bias
UpperCamelCase = roberta_layer.self_attn.k_proj.weight
UpperCamelCase = roberta_layer.self_attn.k_proj.bias
UpperCamelCase = roberta_layer.self_attn.v_proj.weight
UpperCamelCase = roberta_layer.self_attn.v_proj.bias
# self-attention output
UpperCamelCase = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
UpperCamelCase = roberta_layer.self_attn.out_proj.weight
UpperCamelCase = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
UpperCamelCase = roberta_layer.final_layer_norm.weight
UpperCamelCase = roberta_layer.final_layer_norm.bias
# intermediate
UpperCamelCase = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCamelCase = roberta_layer.fca.weight
UpperCamelCase = roberta_layer.fca.bias
# output
UpperCamelCase = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCamelCase = roberta_layer.fca.weight
UpperCamelCase = roberta_layer.fca.bias
# end of layer
if classification_head:
UpperCamelCase = roberta.model.classification_heads["mnli"].dense.weight
UpperCamelCase = roberta.model.classification_heads["mnli"].dense.bias
UpperCamelCase = roberta.model.classification_heads["mnli"].out_proj.weight
UpperCamelCase = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
UpperCamelCase = roberta.model.encoder.lm_head.dense.weight
UpperCamelCase = roberta.model.encoder.lm_head.dense.bias
UpperCamelCase = roberta.model.encoder.lm_head.layer_norm.weight
UpperCamelCase = roberta.model.encoder.lm_head.layer_norm.bias
UpperCamelCase = roberta.model.encoder.lm_head.weight
UpperCamelCase = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCamelCase = roberta.encode(_SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )[0]
if classification_head:
UpperCamelCase = roberta.model.classification_heads["mnli"](roberta.extract_features(_SCREAMING_SNAKE_CASE ) )
else:
UpperCamelCase = roberta.model(_SCREAMING_SNAKE_CASE )[0]
print(our_output.shape , their_output.shape )
UpperCamelCase = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
UpperCamelCase = torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(_SCREAMING_SNAKE_CASE ).mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 244 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
lowerCAmelCase__ = '''\
@inproceedings{popovic-2015-chrf,
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
month = sep,
year = "2015",
address = "Lisbon, Portugal",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W15-3049",
doi = "10.18653/v1/W15-3049",
pages = "392--395",
}
@inproceedings{popovic-2017-chrf,
title = "chr{F}++: words helping character n-grams",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Second Conference on Machine Translation",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4770",
doi = "10.18653/v1/W17-4770",
pages = "612--618",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
lowerCAmelCase__ = '''\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
'''
lowerCAmelCase__ = '''
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
\'score\' (float): The chrF (chrF++) score,
\'char_order\' (int): The character n-gram order,
\'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
\'beta\' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCamelCase ( datasets.Metric ):
def snake_case_ (self ) -> Tuple:
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[
"https://github.com/m-popovic/chrF",
] , )
def snake_case_ (self , __a , __a , __a = CHRF.CHAR_ORDER , __a = CHRF.WORD_ORDER , __a = CHRF.BETA , __a = False , __a = False , __a = False , ) -> Tuple:
UpperCamelCase = len(references[0] )
if any(len(__a ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
UpperCamelCase = [[refs[i] for refs in references] for i in range(__a )]
UpperCamelCase = CHRF(__a , __a , __a , __a , __a , __a )
UpperCamelCase = sb_chrf.corpus_score(__a , __a )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 244 | 1 |
"""simple docstring"""
UpperCAmelCase__ = [
(1_0_0_0, 'M'),
(9_0_0, 'CM'),
(5_0_0, 'D'),
(4_0_0, 'CD'),
(1_0_0, 'C'),
(9_0, 'XC'),
(5_0, 'L'),
(4_0, 'XL'),
(1_0, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
]
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 1_00, """D""": 5_00, """M""": 10_00}
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while place < len(lowercase ):
if (place + 1 < len(lowercase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = []
for arabic, roman in ROMAN:
((_UpperCAmelCase) , (_UpperCAmelCase)) = divmod(lowercase ,lowercase )
result.append(roman * factor )
if number == 0:
break
return "".join(lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int]=7 , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : Optional[Any]=18 , lowerCAmelCase__ : Union[str, Any]=30 , lowerCAmelCase__ : Any=400 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=[0.5, 0.5, 0.5] , lowerCAmelCase__ : int=[0.5, 0.5, 0.5] , ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 18}
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = image_size
_UpperCamelCase = min_resolution
_UpperCamelCase = max_resolution
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean
_UpperCamelCase = image_std
def snake_case__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Tuple = LevitImageProcessor if is_vision_available() else None
def snake_case__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = LevitImageProcessingTester(self )
@property
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) )
def snake_case__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def snake_case__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 324 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 0 ):
"""simple docstring"""
A_ : Tuple = row, column
A_ : Union[str, Any] = [[default_value for c in range(_lowerCamelCase )] for r in range(_lowerCamelCase )]
def __str__( self : List[Any] ):
"""simple docstring"""
A_ : List[str] = f'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
A_ : Any = 0
for row_vector in self.array:
for obj in row_vector:
A_ : int = max(_lowerCamelCase , len(str(_lowerCamelCase ) ) )
A_ : str = f'%{max_element_length}s'
# Make string and return
def single_line(_lowerCamelCase : list[float] ) -> str:
nonlocal string_format_identifier
A_ : Optional[int] = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(_lowerCamelCase ) for row_vector in self.array )
return s
def __repr__( self : List[Any] ):
"""simple docstring"""
return str(self )
def _a ( self : Optional[Any] , _lowerCamelCase : tuple[int, int] ):
"""simple docstring"""
if not (isinstance(_lowerCamelCase , (list, tuple) ) and len(_lowerCamelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Optional[Any] , _lowerCamelCase : tuple[int, int] ):
"""simple docstring"""
assert self.validate_indicies(_lowerCamelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Dict , _lowerCamelCase : tuple[int, int] , _lowerCamelCase : float ):
"""simple docstring"""
assert self.validate_indicies(_lowerCamelCase )
A_ : List[str] = value
def __add__( self : Tuple , _lowerCamelCase : Matrix ):
"""simple docstring"""
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert self.row == another.row and self.column == another.column
# Add
A_ : int = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
A_ : Dict = self[r, c] + another[r, c]
return result
def __neg__( self : List[str] ):
"""simple docstring"""
A_ : List[str] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
A_ : str = -self[r, c]
return result
def __sub__( self : Union[str, Any] , _lowerCamelCase : Matrix ):
"""simple docstring"""
return self + (-another)
def __mul__( self : str , _lowerCamelCase : int | float | Matrix ):
"""simple docstring"""
if isinstance(_lowerCamelCase , (int, float) ): # Scalar multiplication
A_ : int = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
A_ : Union[str, Any] = self[r, c] * another
return result
elif isinstance(_lowerCamelCase , _lowerCamelCase ): # Matrix multiplication
assert self.column == another.row
A_ : int = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
A_ : List[Any] = f'Unsupported type given for another ({type(_lowerCamelCase )})'
raise TypeError(_lowerCamelCase )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[Any] = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
A_ : Union[str, Any] = self[r, c]
return result
def _a ( self : str , _lowerCamelCase : Matrix , _lowerCamelCase : Matrix ):
"""simple docstring"""
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
A_ : Tuple = v.transpose()
A_ : Dict = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def snake_case__ ( ) -> None:
# a^(-1)
A_ : Union[str, Any] = Matrix(3 , 3 , 0 )
for i in range(3 ):
A_ : Optional[Any] = 1
print(f'a^(-1) is {ainv}' )
# u, v
A_ : str = Matrix(3 , 1 , 0 )
A_ : int = 1, 2, -3
A_ : str = Matrix(3 , 1 , 0 )
A_ : Optional[Any] = 4, -2, 5
print(f'u is {u}' )
print(f'v is {v}' )
print(f'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(f'(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase__ , lowerCamelCase__ )}' )
def snake_case__ ( ) -> None:
import doctest
doctest.testmod()
testa()
| 355 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool:
# Base Case
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ):
# Color current vertex
A_ : int = i
# Validate coloring
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ):
return True
# Backtrack
A_ : str = -1
return False
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[int]:
A_ : List[str] = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ):
return colored_vertices
return []
| 4 | 0 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
__lowerCamelCase : Dict = logging.getLogger(__name__)
@dataclass
class A__ :
_UpperCAmelCase :str
_UpperCAmelCase :List[str]
_UpperCAmelCase :Optional[List[str]]
@dataclass
class A__ :
_UpperCAmelCase :List[int]
_UpperCAmelCase :List[int]
_UpperCAmelCase :Optional[List[int]] = None
_UpperCAmelCase :Optional[List[int]] = None
class A__ ( __snake_case ):
_UpperCAmelCase :List[str] = 'train'
_UpperCAmelCase :Dict = 'dev'
_UpperCAmelCase :str = 'test'
class A__ :
@staticmethod
def __UpperCamelCase( A_ , A_ ):
'''simple docstring'''
raise NotImplementedError
@staticmethod
def __UpperCamelCase( A_ ):
'''simple docstring'''
raise NotImplementedError
@staticmethod
def __UpperCamelCase( A_ , A_ , A_ , A_ , A_=False , A_="[CLS]" , A_=1 , A_="[SEP]" , A_=False , A_=False , A_=0 , A_=0 , A_=-100 , A_=0 , A_=True , ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = {label: i for i, label in enumerate(A_ )}
UpperCamelCase : List[str] = []
for ex_index, example in enumerate(A_ ):
if ex_index % 1_0000 == 0:
logger.info("Writing example %d of %d" , A_ , len(A_ ) )
UpperCamelCase : Optional[int] = []
UpperCamelCase : Optional[Any] = []
for word, label in zip(example.words , example.labels ):
UpperCamelCase : str = tokenizer.tokenize(A_ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(A_ ) > 0:
tokens.extend(A_ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(A_ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
UpperCamelCase : Tuple = tokenizer.num_special_tokens_to_add()
if len(A_ ) > max_seq_length - special_tokens_count:
UpperCamelCase : str = tokens[: (max_seq_length - special_tokens_count)]
UpperCamelCase : str = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
UpperCamelCase : Optional[int] = [sequence_a_segment_id] * len(A_ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
UpperCamelCase : Optional[Any] = [cls_token] + tokens
UpperCamelCase : Optional[Any] = [pad_token_label_id] + label_ids
UpperCamelCase : Any = [cls_token_segment_id] + segment_ids
UpperCamelCase : Any = tokenizer.convert_tokens_to_ids(A_ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
UpperCamelCase : List[str] = [1 if mask_padding_with_zero else 0] * len(A_ )
# Zero-pad up to the sequence length.
UpperCamelCase : Tuple = max_seq_length - len(A_ )
if pad_on_left:
UpperCamelCase : str = ([pad_token] * padding_length) + input_ids
UpperCamelCase : Dict = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
UpperCamelCase : Optional[Any] = ([pad_token_segment_id] * padding_length) + segment_ids
UpperCamelCase : int = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(A_ ) == max_seq_length
assert len(A_ ) == max_seq_length
assert len(A_ ) == max_seq_length
assert len(A_ ) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***" )
logger.info("guid: %s" , example.guid )
logger.info("tokens: %s" , " ".join([str(A_ ) for x in tokens] ) )
logger.info("input_ids: %s" , " ".join([str(A_ ) for x in input_ids] ) )
logger.info("input_mask: %s" , " ".join([str(A_ ) for x in input_mask] ) )
logger.info("segment_ids: %s" , " ".join([str(A_ ) for x in segment_ids] ) )
logger.info("label_ids: %s" , " ".join([str(A_ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
UpperCamelCase : Tuple = None
features.append(
InputFeatures(
input_ids=A_ , attention_mask=A_ , token_type_ids=A_ , label_ids=A_ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class A__ ( __snake_case ):
_UpperCAmelCase :List[InputFeatures]
_UpperCAmelCase :int = nn.CrossEntropyLoss().ignore_index
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ = None , A_=False , A_ = Split.train , ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = os.path.join(
A_ , "cached_{}_{}_{}".format(mode.value , tokenizer.__class__.__name__ , str(A_ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase : List[Any] = cached_features_file + ".lock"
with FileLock(A_ ):
if os.path.exists(A_ ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
UpperCamelCase : Dict = torch.load(A_ )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
UpperCamelCase : List[Any] = token_classification_task.read_examples_from_file(A_ , A_ )
# TODO clean up all this to leverage built-in features of tokenizers
UpperCamelCase : int = token_classification_task.convert_examples_to_features(
A_ , A_ , A_ , A_ , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=A_ , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , A_ )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self , A_ ):
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class A__ :
_UpperCAmelCase :List[InputFeatures]
_UpperCAmelCase :int = -1_0_0
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ = None , A_=False , A_ = Split.train , ):
'''simple docstring'''
UpperCamelCase : str = token_classification_task.read_examples_from_file(A_ , A_ )
# TODO clean up all this to leverage built-in features of tokenizers
UpperCamelCase : str = token_classification_task.convert_examples_to_features(
A_ , A_ , A_ , A_ , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=A_ , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
UpperCamelCase : Optional[int] = tf.data.Dataset.from_generator(
A_ , ({"input_ids": tf.intaa, "attention_mask": tf.intaa}, tf.intaa) , (
{"input_ids": tf.TensorShape([None] ), "attention_mask": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
UpperCamelCase : Optional[int] = tf.data.Dataset.from_generator(
A_ , ({"input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa}, tf.intaa) , (
{
"input_ids": tf.TensorShape([None] ),
"attention_mask": tf.TensorShape([None] ),
"token_type_ids": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self , A_ ):
'''simple docstring'''
return self.features[i]
| 52 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
__lowerCamelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class A__ ( __snake_case ):
def __init__( self , A_ , A_ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=A_ , scheduler=A_ )
@torch.no_grad()
def __call__( self , A_ = 1 , A_ = 100 , A_ = None , A_ = None , A_ = True , ):
'''simple docstring'''
if audio_length_in_s is None:
UpperCamelCase : str = self.unet.config.sample_size / self.unet.config.sample_rate
UpperCamelCase : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate
UpperCamelCase : Any = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
F""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
UpperCamelCase : Union[str, Any] = int(A_ )
if sample_size % down_scale_factor != 0:
UpperCamelCase : List[str] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
F""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
" process." )
UpperCamelCase : Any = int(A_ )
UpperCamelCase : Union[str, Any] = next(iter(self.unet.parameters() ) ).dtype
UpperCamelCase : Optional[int] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(A_ , A_ ) and len(A_ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(A_ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCamelCase : Optional[Any] = randn_tensor(A_ , generator=A_ , device=self.device , dtype=A_ )
# set step values
self.scheduler.set_timesteps(A_ , device=audio.device )
UpperCamelCase : Optional[int] = self.scheduler.timesteps.to(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCamelCase : Dict = self.unet(A_ , A_ ).sample
# 2. compute previous image: x_t -> t_t-1
UpperCamelCase : int = self.scheduler.step(A_ , A_ , A_ ).prev_sample
UpperCamelCase : Optional[Any] = audio.clamp(-1 , 1 ).float().cpu().numpy()
UpperCamelCase : Dict = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=A_ )
| 52 | 1 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __magic_name__ :
def __init__( self , _a ) -> Optional[Any]:
lowerCAmelCase_ = data
lowerCAmelCase_ = [0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476, 0xc3d2e1f0]
@staticmethod
def __a ( _a , _a ) -> List[str]:
return ((n << b) | (n >> (32 - b))) & 0xffffffff
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = b"\x80" + b"\x00" * (63 - (len(self.data ) + 8) % 64)
lowerCAmelCase_ = self.data + padding + struct.pack(">Q" , 8 * len(self.data ) )
return padded_data
def __a ( self ) -> Tuple:
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def __a ( self , _a ) -> Optional[Any]:
lowerCAmelCase_ = list(struct.unpack(">16L" , _a ) ) + [0] * 64
for i in range(16 , 80 ):
lowerCAmelCase_ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def __a ( self ) -> Dict:
lowerCAmelCase_ = self.padding()
lowerCAmelCase_ = self.split_blocks()
for block in self.blocks:
lowerCAmelCase_ = self.expand_block(_a )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
lowerCAmelCase_ = (b & c) | ((~b) & d)
lowerCAmelCase_ = 0x5a827999
elif 20 <= i < 40:
lowerCAmelCase_ = b ^ c ^ d
lowerCAmelCase_ = 0x6ed9eba1
elif 40 <= i < 60:
lowerCAmelCase_ = (b & c) | (b & d) | (c & d)
lowerCAmelCase_ = 0x8f1bbcdc
elif 60 <= i < 80:
lowerCAmelCase_ = b ^ c ^ d
lowerCAmelCase_ = 0xca62c1d6
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = (
self.rotate(_a , 5 ) + f + e + k + expanded_block[i] & 0xffffffff,
a,
self.rotate(_a , 30 ),
c,
d,
)
lowerCAmelCase_ = (
self.h[0] + a & 0xffffffff,
self.h[1] + b & 0xffffffff,
self.h[2] + c & 0xffffffff,
self.h[3] + d & 0xffffffff,
self.h[4] + e & 0xffffffff,
)
return ("{:08x}" * 5).format(*self.h )
def A():
lowerCAmelCase_ = b"Test String"
assert SHAaHash(__a ).final_hash() == hashlib.shaa(__a ).hexdigest() # noqa: S324
def A():
lowerCAmelCase_ = argparse.ArgumentParser(description="Process some strings or files" )
parser.add_argument(
"--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument("--file" , dest="input_file" , help="Hash contents of a file" )
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
lowerCAmelCase_ = f.read()
else:
lowerCAmelCase_ = bytes(__a , "utf-8" )
print(SHAaHash(__a ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 22 |
import math
def A(__a: int ):
return math.sqrt(__a ) * math.sqrt(__a ) == num
def A(__a: int ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = n
while left <= right:
lowerCAmelCase_ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowerCAmelCase_ = mid - 1
else:
lowerCAmelCase_ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 1 |
'''simple docstring'''
def _UpperCamelCase ( __A ) -> list[int]:
'''simple docstring'''
if num <= 0:
raise ValueError("Input must be a positive integer" )
UpperCamelCase__ = [True] * (num + 1)
UpperCamelCase__ = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __A ):
UpperCamelCase__ = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : Union[str, Any] = int(input('Enter a positive integer: ').strip())
print(prime_sieve_eratosthenes(user_num))
| 80 |
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( snake_case_ , unittest.TestCase ):
__UpperCAmelCase : str = DebertaTokenizer
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : int = DebertaTokenizerFast
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"[UNK]",
]
snake_case : str = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
snake_case : int = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
snake_case : Optional[Any] = {"unk_token": "[UNK]"}
snake_case : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCamelCase__ ) )
def lowerCamelCase ( self , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = "lower newer"
snake_case : Any = "lower newer"
return input_text, output_text
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : Optional[int] = self.get_tokenizer()
snake_case : str = "lower newer"
snake_case : Dict = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
snake_case : Optional[Any] = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
snake_case : List[Any] = tokens + [tokenizer.unk_token]
snake_case : Dict = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = self.get_tokenizer()
snake_case : Dict = tokenizer("Hello" , "World" )
snake_case : str = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"] , UpperCamelCase__ )
@slow
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : List[Any] = self.tokenizer_class.from_pretrained("microsoft/deberta-base" )
snake_case : Dict = tokenizer.encode("sequence builders" , add_special_tokens=UpperCamelCase__ )
snake_case : Tuple = tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCamelCase__ )
snake_case : List[str] = tokenizer.encode(
"sequence builders" , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
snake_case : Optional[int] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
snake_case : int = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
snake_case : int = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
snake_case : str = tokenizer_class.from_pretrained("microsoft/deberta-base" )
snake_case : Union[str, Any] = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
snake_case : Optional[int] = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ )
snake_case : Optional[int] = [tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ ) for seq in encoding["input_ids"]]
# fmt: off
snake_case : Dict = {
"input_ids": [
[1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2]
],
"token_type_ids": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
snake_case : int = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
self.assertDictEqual(encoding.data , UpperCamelCase__ )
for expected, decoded in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 203 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class a__ ( a_ ):
_SCREAMING_SNAKE_CASE : Any = 'SpeechT5FeatureExtractor'
_SCREAMING_SNAKE_CASE : int = 'SpeechT5Tokenizer'
def __init__( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
super().__init__(lowercase_ , lowercase_ )
def __call__( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
_lowercase : Any = kwargs.pop("audio" , lowercase_ )
_lowercase : Tuple = kwargs.pop("text" , lowercase_ )
_lowercase : Optional[int] = kwargs.pop("text_target" , lowercase_ )
_lowercase : Tuple = kwargs.pop("audio_target" , lowercase_ )
_lowercase : List[Any] = kwargs.pop("sampling_rate" , lowercase_ )
if audio is not None and text is not None:
raise ValueError(
"Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?" )
if audio_target is not None and text_target is not None:
raise ValueError(
"Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process." )
if audio is not None:
_lowercase : Tuple = self.feature_extractor(lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_ )
elif text is not None:
_lowercase : Any = self.tokenizer(lowercase_ , **lowercase_ )
else:
_lowercase : Dict = None
if audio_target is not None:
_lowercase : int = self.feature_extractor(audio_target=lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_ )
_lowercase : Tuple = targets['''input_values''']
elif text_target is not None:
_lowercase : Union[str, Any] = self.tokenizer(lowercase_ , **lowercase_ )
_lowercase : int = targets['''input_ids''']
else:
_lowercase : List[Any] = None
if inputs is None:
return targets
if targets is not None:
_lowercase : List[Any] = labels
_lowercase : List[Any] = targets.get("attention_mask" )
if decoder_attention_mask is not None:
_lowercase : Tuple = decoder_attention_mask
return inputs
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
_lowercase : int = kwargs.pop("input_values" , lowercase_ )
_lowercase : Dict = kwargs.pop("input_ids" , lowercase_ )
_lowercase : Dict = kwargs.pop("labels" , lowercase_ )
if input_values is not None and input_ids is not None:
raise ValueError("Cannot process both `input_values` and `input_ids` inputs." )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded." )
if input_values is not None:
_lowercase : List[str] = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_ )
elif input_ids is not None:
_lowercase : Optional[Any] = self.tokenizer.pad(lowercase_ , **lowercase_ )
else:
_lowercase : Tuple = None
if labels is not None:
if "input_ids" in labels or (isinstance(lowercase_ , lowercase_ ) and "input_ids" in labels[0]):
_lowercase : Optional[int] = self.tokenizer.pad(lowercase_ , **lowercase_ )
_lowercase : str = targets['''input_ids''']
else:
_lowercase : Any = self.feature_extractor.feature_size
_lowercase : Optional[int] = self.feature_extractor.num_mel_bins
_lowercase : str = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_ )
_lowercase : Any = feature_size_hack
_lowercase : int = targets['''input_values''']
else:
_lowercase : List[Any] = None
if inputs is None:
return targets
if targets is not None:
_lowercase : int = labels
_lowercase : List[Any] = targets.get("attention_mask" )
if decoder_attention_mask is not None:
_lowercase : Any = decoder_attention_mask
return inputs
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
| 358 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
_snake_case = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
_snake_case = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
_snake_case = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def _lowerCamelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] , )
def _lowerCamelCase ( self ):
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase="uniform_average" , _UpperCamelCase=True ):
"""simple docstring"""
_lowercase : str = mean_squared_error(
_UpperCamelCase , _UpperCamelCase , sample_weight=_UpperCamelCase , multioutput=_UpperCamelCase , squared=_UpperCamelCase )
return {"mse": mse}
| 199 | 0 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a = logging.get_logger(__name__)
a = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all BART models at https://huggingface.co/models?filter=bart
a = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
}
a = {
'''facebook/bart-base''': 1_024,
'''facebook/bart-large''': 1_024,
'''facebook/bart-large-mnli''': 1_024,
'''facebook/bart-large-cnn''': 1_024,
'''facebook/bart-large-xsum''': 1_024,
'''yjernite/bart_eli5''': 1_024,
}
@lru_cache()
def _snake_case ( ) -> Optional[Any]:
'''simple docstring'''
_A = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
_A = bs[:]
_A = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_snake_case )
cs.append(2**8 + n )
n += 1
_A = [chr(_snake_case ) for n in cs]
return dict(zip(_snake_case , _snake_case ) )
def _snake_case ( _snake_case : Tuple ) -> Tuple:
'''simple docstring'''
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A = char
return pairs
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict="replace" , _UpperCAmelCase : int="<s>" , _UpperCAmelCase : str="</s>" , _UpperCAmelCase : Union[str, Any]="</s>" , _UpperCAmelCase : Optional[Any]="<s>" , _UpperCAmelCase : Union[str, Any]="<unk>" , _UpperCAmelCase : Tuple="<pad>" , _UpperCAmelCase : Dict="<mask>" , _UpperCAmelCase : Tuple=False , **_UpperCAmelCase : Optional[Any] , ):
_A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else bos_token
_A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else eos_token
_A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else sep_token
_A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cls_token
_A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
_A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , )
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
_A = json.load(_UpperCAmelCase )
_A = {v: k for k, v in self.encoder.items()}
_A = errors # how to handle errors in decoding
_A = bytes_to_unicode()
_A = {v: k for k, v in self.byte_encoder.items()}
with open(_UpperCAmelCase , encoding='utf-8' ) as merges_handle:
_A = merges_handle.read().split('\n' )[1:-1]
_A = [tuple(merge.split() ) for merge in bpe_merges]
_A = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
_A = {}
_A = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_A = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def lowerCAmelCase_ ( self : Dict ):
return len(self.encoder )
def lowerCAmelCase_ ( self : Optional[int] ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : List[Any] ):
if token in self.cache:
return self.cache[token]
_A = tuple(_UpperCAmelCase )
_A = get_pairs(_UpperCAmelCase )
if not pairs:
return token
while True:
_A = min(_UpperCAmelCase , key=lambda _UpperCAmelCase : self.bpe_ranks.get(_UpperCAmelCase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(_UpperCAmelCase ):
try:
_A = word.index(_UpperCAmelCase , _UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_A = j
if word[i] == first and i < len(_UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(_UpperCAmelCase )
_A = new_word
if len(_UpperCAmelCase ) == 1:
break
else:
_A = get_pairs(_UpperCAmelCase )
_A = ' '.join(_UpperCAmelCase )
_A = word
return word
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : List[str] ):
_A = []
for token in re.findall(self.pat , _UpperCAmelCase ):
_A = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCAmelCase ).split(' ' ) )
return bpe_tokens
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : List[str] ):
return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Any ):
return self.decoder.get(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Dict ):
_A = ''.join(_UpperCAmelCase )
_A = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ):
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_A = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase ) + '\n' )
_A = 0
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
_A = token_index
writer.write(' '.join(_UpperCAmelCase ) + '\n' )
index += 1
return vocab_file, merge_file
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A = [self.cls_token_id]
_A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1]
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int=False , **_UpperCAmelCase : Optional[Any] ):
_A = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_UpperCAmelCase ) > 0 and not text[0].isspace()):
_A = ' ' + text
return (text, kwargs)
| 315 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
a = TypeVar('''T''')
class lowercase_ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : T ):
_A = data
_A = None
def __str__( self : str ):
return F'''{self.data}'''
class lowercase_ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple ):
_A = None
def __iter__( self : List[Any] ):
_A = self.top
while node:
yield node.data
_A = node.next
def __str__( self : Union[str, Any] ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __len__( self : List[Any] ):
return len(tuple(iter(self ) ) )
def lowerCAmelCase_ ( self : str ):
return self.top is None
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : T ):
_A = Node(_UpperCAmelCase )
if not self.is_empty():
_A = self.top
_A = node
def lowerCAmelCase_ ( self : Dict ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , _UpperCAmelCase )
_A = self.top
_A = self.top.next
return pop_node.data
def lowerCAmelCase_ ( self : Tuple ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 315 | 1 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger("transformers.models.speecht5")
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
hf_model.apply_weight_norm()
UpperCAmelCase : Dict = checkpoint['input_conv.weight_g']
UpperCAmelCase : Any = checkpoint['input_conv.weight_v']
UpperCAmelCase : Any = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
UpperCAmelCase : Any = checkpoint[F"""upsamples.{i}.1.weight_g"""]
UpperCAmelCase : Union[str, Any] = checkpoint[F"""upsamples.{i}.1.weight_v"""]
UpperCAmelCase : str = checkpoint[F"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCAmelCase : Any = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""]
UpperCAmelCase : Optional[int] = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""]
UpperCAmelCase : Dict = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""]
UpperCAmelCase : Union[str, Any] = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""]
UpperCAmelCase : int = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""]
UpperCAmelCase : Optional[int] = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""]
UpperCAmelCase : Dict = checkpoint['output_conv.1.weight_g']
UpperCAmelCase : str = checkpoint['output_conv.1.weight_v']
UpperCAmelCase : Union[str, Any] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , ):
if config_path is not None:
UpperCAmelCase : Any = SpeechTaHifiGanConfig.from_pretrained(UpperCAmelCase_ )
else:
UpperCAmelCase : Optional[Any] = SpeechTaHifiGanConfig()
UpperCAmelCase : List[Any] = SpeechTaHifiGan(UpperCAmelCase_ )
UpperCAmelCase : int = torch.load(UpperCAmelCase_ )
load_weights(orig_checkpoint['model']['generator'] , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : int = np.load(UpperCAmelCase_ )
UpperCAmelCase : str = stats[0].reshape(-1 )
UpperCAmelCase : List[str] = stats[1].reshape(-1 )
UpperCAmelCase : Union[str, Any] = torch.from_numpy(UpperCAmelCase_ ).float()
UpperCAmelCase : List[str] = torch.from_numpy(UpperCAmelCase_ ).float()
model.save_pretrained(UpperCAmelCase_ )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
lowercase__ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 280 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : VQModel , lowercase_ : UNetaDModel , lowercase_ : DDIMScheduler ) -> int:
super().__init__()
self.register_modules(vqvae=lowercase_ , unet=lowercase_ , scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : str , lowercase_ : int = 1 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : float = 0.0 , lowercase_ : int = 50 , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , **lowercase_ : Optional[Any] , ) -> Union[Tuple, ImagePipelineOutput]:
UpperCAmelCase : str = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=lowercase_ , )
UpperCAmelCase : Any = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase : Optional[Any] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowercase_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCAmelCase : Optional[int] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase : Tuple = {}
if accepts_eta:
UpperCAmelCase : List[str] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCAmelCase : Dict = self.scheduler.scale_model_input(lowercase_ , lowercase_ )
# predict the noise residual
UpperCAmelCase : Dict = self.unet(lowercase_ , lowercase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : Dict = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
# decode the image latents with the VAE
UpperCAmelCase : Any = self.vqvae.decode(lowercase_ ).sample
UpperCAmelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase : Tuple = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 280 | 1 |
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 't5'
_SCREAMING_SNAKE_CASE = ['past_key_values']
_SCREAMING_SNAKE_CASE = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self , lowercase=32_128 , lowercase=512 , lowercase=64 , lowercase=2_048 , lowercase=6 , lowercase=None , lowercase=8 , lowercase=32 , lowercase=128 , lowercase=0.1 , lowercase=1e-6 , lowercase=1.0 , lowercase="relu" , lowercase=True , lowercase=True , lowercase=0 , lowercase=1 , **lowercase , ) -> Union[str, Any]:
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = d_kv
lowerCAmelCase = d_ff
lowerCAmelCase = num_layers
lowerCAmelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCAmelCase = num_heads
lowerCAmelCase = relative_attention_num_buckets
lowerCAmelCase = relative_attention_max_distance
lowerCAmelCase = dropout_rate
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = initializer_factor
lowerCAmelCase = feed_forward_proj
lowerCAmelCase = use_cache
lowerCAmelCase = self.feed_forward_proj.split("""-""" )
lowerCAmelCase = act_info[-1]
lowerCAmelCase = act_info[0] == """gated"""
if len(lowercase ) > 1 and act_info[0] != "gated" or len(lowercase ) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowerCAmelCase = """gelu_new"""
super().__init__(
pad_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , **lowercase , )
class lowercase ( _UpperCAmelCase ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
lowerCAmelCase = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
lowerCAmelCase = """past_encoder_sequence + sequence"""
lowerCAmelCase = {0: """batch"""}
lowerCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
lowerCAmelCase = {0: """batch""", 1: """decoder_sequence"""}
lowerCAmelCase = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowercase , direction="""inputs""" )
return common_inputs
@property
def _snake_case ( self ) -> int:
return 13
| 46 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase__ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
}
UpperCamelCase__ = {
'''facebook/bart-base''': 1_0_2_4,
'''facebook/bart-large''': 1_0_2_4,
'''facebook/bart-large-mnli''': 1_0_2_4,
'''facebook/bart-large-cnn''': 1_0_2_4,
'''facebook/bart-large-xsum''': 1_0_2_4,
'''yjernite/bart_eli5''': 1_0_2_4,
}
@lru_cache()
def a__ ( ) -> List[Any]:
UpperCAmelCase__ : int = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
UpperCAmelCase__ : Optional[int] = bs[:]
UpperCAmelCase__ : List[str] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase__ )
cs.append(2**8 + n )
n += 1
UpperCAmelCase__ : Any = [chr(lowerCAmelCase__ ) for n in cs]
return dict(zip(lowerCAmelCase__ , lowerCAmelCase__ ) )
def a__ ( lowerCAmelCase__ ) -> Union[str, Any]:
UpperCAmelCase__ : str = set()
UpperCAmelCase__ : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ : Optional[int] = char
return pairs
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , _A : Optional[int] , _A : List[Any] , _A : int="replace" , _A : List[Any]="<s>" , _A : List[Any]="</s>" , _A : List[Any]="</s>" , _A : Optional[int]="<s>" , _A : List[str]="<unk>" , _A : List[str]="<pad>" , _A : Union[str, Any]="<mask>" , _A : Any=False , **_A : Dict , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else bos_token
UpperCAmelCase__ : Any = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else eos_token
UpperCAmelCase__ : Optional[int] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else sep_token
UpperCAmelCase__ : Tuple = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else cls_token
UpperCAmelCase__ : int = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else unk_token
UpperCAmelCase__ : Optional[Any] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ : Optional[int] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
super().__init__(
errors=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , **_A , )
with open(_A , encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase__ : Optional[Any] = json.load(_A )
UpperCAmelCase__ : Any = {v: k for k, v in self.encoder.items()}
UpperCAmelCase__ : List[str] = errors # how to handle errors in decoding
UpperCAmelCase__ : str = bytes_to_unicode()
UpperCAmelCase__ : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(_A , encoding='''utf-8''' ) as merges_handle:
UpperCAmelCase__ : str = merges_handle.read().split('''\n''' )[1:-1]
UpperCAmelCase__ : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase__ : Optional[Any] = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase__ : Optional[int] = {}
UpperCAmelCase__ : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase__ : List[Any] = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def lowercase_ ( self : int ):
'''simple docstring'''
return len(self.encoder )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self : List[Any] , _A : Tuple ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ : Optional[Any] = tuple(_A )
UpperCAmelCase__ : Dict = get_pairs(_A )
if not pairs:
return token
while True:
UpperCAmelCase__ : Optional[Any] = min(_A , key=lambda _A : self.bpe_ranks.get(_A , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ : str = bigram
UpperCAmelCase__ : int = []
UpperCAmelCase__ : Tuple = 0
while i < len(_A ):
try:
UpperCAmelCase__ : Optional[int] = word.index(_A , _A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ : Tuple = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ : Optional[Any] = tuple(_A )
UpperCAmelCase__ : List[Any] = new_word
if len(_A ) == 1:
break
else:
UpperCAmelCase__ : Union[str, Any] = get_pairs(_A )
UpperCAmelCase__ : Optional[Any] = ''' '''.join(_A )
UpperCAmelCase__ : List[Any] = word
return word
def lowercase_ ( self : str , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = []
for token in re.findall(self.pat , _A ):
UpperCAmelCase__ : str = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(''' ''' ) )
return bpe_tokens
def lowercase_ ( self : List[str] , _A : Any ):
'''simple docstring'''
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def lowercase_ ( self : int , _A : List[str] ):
'''simple docstring'''
return self.decoder.get(_A )
def lowercase_ ( self : Tuple , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = ''''''.join(_A )
UpperCAmelCase__ : List[str] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def lowercase_ ( self : int , _A : str , _A : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ : Tuple = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase__ : Any = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A ) + '''\n''' )
UpperCAmelCase__ : Union[str, Any] = 0
with open(_A , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
UpperCAmelCase__ : List[str] = token_index
writer.write(''' '''.join(_A ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowercase_ ( self : str , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : List[str] = [self.cls_token_id]
UpperCAmelCase__ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def lowercase_ ( self : Dict , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = [self.sep_token_id]
UpperCAmelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self : Optional[Any] , _A : Any , _A : Dict=False , **_A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()):
UpperCAmelCase__ : Tuple = ''' ''' + text
return (text, kwargs)
| 181 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
snake_case = logging.get_logger(__name__)
if is_vision_available():
import PIL
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = ['''pixel_values''']
def __init__( self : List[Any] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 255 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : bool = True , **UpperCAmelCase_ : Optional[Any] , ):
super().__init__(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {"shortest_edge": 224}
SCREAMING_SNAKE_CASE : Tuple = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = crop_size if crop_size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE : str = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ , param_name="crop_size" )
SCREAMING_SNAKE_CASE : Optional[Any] = do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size
SCREAMING_SNAKE_CASE : Optional[Any] = resample
SCREAMING_SNAKE_CASE : Optional[int] = do_center_crop
SCREAMING_SNAKE_CASE : Optional[Any] = crop_size
SCREAMING_SNAKE_CASE : str = do_rescale
SCREAMING_SNAKE_CASE : Any = rescale_factor
SCREAMING_SNAKE_CASE : Dict = do_normalize
SCREAMING_SNAKE_CASE : Dict = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE : str = image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE : Dict = do_convert_rgb
def _A ( self : int , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Tuple , ):
SCREAMING_SNAKE_CASE : str = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = get_resize_output_image_size(UpperCAmelCase_ , size=size["shortest_edge"] , default_to_square=UpperCAmelCase_ )
return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self : str , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : int , ):
SCREAMING_SNAKE_CASE : List[str] = get_size_dict(UpperCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(UpperCAmelCase_ , size=(size["height"], size["width"]) , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self : Optional[int] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[int, float] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : str , ):
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self : Optional[Any] , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : int = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : float = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase_ : Dict , ):
SCREAMING_SNAKE_CASE : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Dict = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Dict = get_size_dict(UpperCAmelCase_ , param_name="size" , default_to_square=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE : Dict = get_size_dict(UpperCAmelCase_ , param_name="crop_size" , default_to_square=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : Optional[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : List[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE : Tuple = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE : int = [convert_to_rgb(UpperCAmelCase_ ) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : List[Any] = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : int = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE : List[str] = [self.center_crop(image=UpperCAmelCase_ , size=UpperCAmelCase_ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : List[str] = [self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : Dict = [self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ ) for image in images]
SCREAMING_SNAKE_CASE : int = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_ ) for image in images]
SCREAMING_SNAKE_CASE : List[Any] = {"pixel_values": images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_ )
| 360 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 319 | 0 |
'''simple docstring'''
from statistics import mean
import numpy as np
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : str = 0
# Number of processes finished
lowerCAmelCase__ : Tuple = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowerCAmelCase__ : List[str] = [0] * no_of_process
# List to include calculation results
lowerCAmelCase__ : str = [0] * no_of_process
# Sort by arrival time.
lowerCAmelCase__ : Optional[int] = [burst_time[i] for i in np.argsort(UpperCamelCase )]
lowerCAmelCase__ : Tuple = [process_name[i] for i in np.argsort(UpperCamelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowerCAmelCase__ : Union[str, Any] = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowerCAmelCase__ : int = arrival_time[i]
lowerCAmelCase__ : Dict = 0
# Index showing the location of the process being performed
lowerCAmelCase__ : Tuple = 0
# Saves the current response ratio.
lowerCAmelCase__ : List[str] = 0
for i in range(0 , UpperCamelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowerCAmelCase__ : Union[str, Any] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowerCAmelCase__ : Tuple = temp
lowerCAmelCase__ : Any = i
# Calculate the turn around time
lowerCAmelCase__ : Dict = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowerCAmelCase__ : str = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Any = [0] * no_of_process
for i in range(0 , UpperCamelCase ):
lowerCAmelCase__ : str = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
_lowerCAmelCase = 5
_lowerCAmelCase = ['''A''', '''B''', '''C''', '''D''', '''E''']
_lowerCAmelCase = [1, 2, 3, 4, 5]
_lowerCAmelCase = [1, 2, 3, 4, 5]
_lowerCAmelCase = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
_lowerCAmelCase = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
F"""{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t"""
F"""{turn_around_time[i]}\t\t\t{waiting_time[i]}"""
)
print(F"""average waiting time : {mean(waiting_time):.5f}""")
print(F"""average turn around time : {mean(turn_around_time):.5f}""")
| 37 |
import math
class snake_case__ :
def __init__( self , lowerCAmelCase__=0 ) -> Optional[int]: # a graph with Node 0,1,...,N-1
__magic_name__ : Tuple = n
__magic_name__ : Union[str, Any] = [
[math.inf for j in range(0 , lowerCAmelCase__ )] for i in range(0 , lowerCAmelCase__ )
] # adjacency matrix for weight
__magic_name__ : List[Any] = [
[math.inf for j in range(0 , lowerCAmelCase__ )] for i in range(0 , lowerCAmelCase__ )
] # dp[i][j] stores minimum distance from i to j
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : Dict = w
def __magic_name__ ( self ) -> Optional[int]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__magic_name__ : Optional[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
return self.dp[u][v]
if __name__ == "__main__":
__magic_name__: Dict = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 342 | 0 |
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
a_ : List[Any] = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
a_ : Dict = logging.WARNING
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = os.getenv('DATASETS_VERBOSITY' , __A)
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
F'''has to be one of: { ', '.join(log_levels.keys()) }''')
return _default_log_level
def lowerCamelCase__ ():
return __name__.split('.')[0]
def lowerCamelCase__ ():
return logging.getLogger(_get_library_name())
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level())
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET)
def lowerCamelCase__ (_UpperCAmelCase = None):
if name is None:
SCREAMING_SNAKE_CASE = _get_library_name()
return logging.getLogger(__A)
def lowerCamelCase__ ():
return _get_library_root_logger().getEffectiveLevel()
def lowerCamelCase__ (_UpperCAmelCase):
_get_library_root_logger().setLevel(__A)
def lowerCamelCase__ ():
return set_verbosity(__A)
def lowerCamelCase__ ():
return set_verbosity(__A)
def lowerCamelCase__ ():
return set_verbosity(__A)
def lowerCamelCase__ ():
return set_verbosity(__A)
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = False
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class _snake_case :
def __init__( self , *a , **a) -> List[Any]: # pylint: disable=unused-argument
SCREAMING_SNAKE_CASE = args[0] if args else None
def __iter__( self) -> Optional[Any]:
return iter(self._iterator)
def __getattr__( self , a) -> List[str]:
def empty_fn(*a , **a): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self) -> Optional[int]:
return self
def __exit__( self , a , a , a) -> str:
return
a_ : Optional[int] = True
class _snake_case :
def __call__( self , *a , a=False , **a) -> List[str]:
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*_snake_case , **_snake_case)
else:
return EmptyTqdm(*_snake_case , **_snake_case)
def SCREAMING_SNAKE_CASE__ ( self , *a , **a) -> Optional[int]:
SCREAMING_SNAKE_CASE = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_snake_case , **_snake_case)
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
a_ : Tuple = _tqdm_cls()
def lowerCamelCase__ ():
global _tqdm_active
return bool(_tqdm_active)
def lowerCamelCase__ ():
global _tqdm_active
SCREAMING_SNAKE_CASE = True
def lowerCamelCase__ ():
global _tqdm_active
SCREAMING_SNAKE_CASE = False
| 357 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a_ : Any = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
a_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 327 | 0 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCAmelCase : Dict = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : str , A : int=None , A : List[str]=None , *A : str , **A : List[str] ):
super().__init__(*A , **A )
if config is None:
assert isinstance(self.model , A ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
__snake_case: Any = self.model.config
else:
__snake_case: Union[str, Any] = config
__snake_case: Any = data_args
__snake_case: List[Any] = self.config.tgt_vocab_size if isinstance(self.config , A ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
__snake_case: int = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
__snake_case: List[Any] = label_smoothed_nll_loss
def UpperCAmelCase__ ( self : Union[str, Any] , A : Tuple ):
if self.optimizer is None:
__snake_case: Dict = ["""bias""", """LayerNorm.weight"""]
__snake_case: List[str] = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
__snake_case: List[str] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
__snake_case: int = Adafactor
__snake_case: Optional[int] = {"""scale_parameter""": False, """relative_step""": False}
else:
__snake_case: str = AdamW
__snake_case: Optional[int] = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
__snake_case: Optional[Any] = self.args.learning_rate
if self.sharded_ddp:
__snake_case: str = OSS(
params=A , optim=A , **A , )
else:
__snake_case: Tuple = optimizer_cls(A , **A )
if self.lr_scheduler is None:
__snake_case: List[Any] = self._get_lr_scheduler(A )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def UpperCAmelCase__ ( self : Union[str, Any] , A : str ):
__snake_case: Dict = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
__snake_case: Optional[int] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
__snake_case: List[Any] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
__snake_case: Union[str, Any] = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=A )
return scheduler
def UpperCAmelCase__ ( self : int ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def UpperCAmelCase__ ( self : Dict , A : Tuple , A : List[Any] , A : str ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
__snake_case: Dict = model(**A , use_cache=A )[0]
__snake_case: Optional[int] = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
__snake_case , __snake_case: str = model(**A , labels=A , use_cache=A )[:2]
else:
# compute label smoothed loss
__snake_case: int = model(**A , use_cache=A )[0]
__snake_case: int = torch.nn.functional.log_softmax(A , dim=-1 )
__snake_case , __snake_case: List[Any] = self.loss_fn(A , A , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def UpperCAmelCase__ ( self : Tuple , A : List[str] , A : Union[str, Any] ):
__snake_case: Any = inputs.pop("""labels""" )
__snake_case , __snake_case: Tuple = self._compute_loss(A , A , A )
return loss
def UpperCAmelCase__ ( self : int , A : Union[str, Any] , A : Union[str, Any] , A : List[Any] , A : List[Any] = None , ):
__snake_case: Tuple = self._prepare_inputs(A )
__snake_case: str = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
__snake_case: Tuple = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **A , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
__snake_case: int = self._pad_tensors_to_max_len(A , gen_kwargs["""max_length"""] )
__snake_case: Any = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
__snake_case , __snake_case: List[Any] = self._compute_loss(A , A , A )
__snake_case: Optional[Any] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
__snake_case: Optional[int] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
__snake_case: str = self._pad_tensors_to_max_len(A , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def UpperCAmelCase__ ( self : Union[str, Any] , A : Optional[Any] , A : int ):
# If PAD token is not defined at least EOS token has to be defined
__snake_case: str = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f''' padded to `max_length`={max_length}''' )
__snake_case: Any = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
__snake_case: Any = tensor
return padded_tensor
| 111 |
import comet # From: unbabel-comet
import torch
import datasets
UpperCAmelCase__ = datasets.logging.get_logger(__name__)
UpperCAmelCase__ = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
UpperCAmelCase__ = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
UpperCAmelCase__ = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCamelCase__ ( datasets.Metric):
def __A (self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def __A (self , UpperCAmelCase ) -> Dict:
if self.config_name == "default":
_lowercase =comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
_lowercase =comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False ) -> int:
if gpus is None:
_lowercase =1 if torch.cuda.is_available() else 0
_lowercase ={'''src''': sources, '''mt''': predictions, '''ref''': references}
_lowercase =[dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for t in zip(*data.values() )]
_lowercase , _lowercase =self.scorer.predict(UpperCAmelCase , gpus=UpperCAmelCase , progress_bar=UpperCAmelCase )
return {"mean_score": mean_score, "scores": scores}
| 5 | 0 |
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=lowerCAmelCase ):
_a : Dict= ["torch", "transformers", "onnx"]
def __init__( self ,*snake_case ,**snake_case ):
'''simple docstring'''
requires_backends(self ,["""torch""", """transformers""", """onnx"""] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ,*snake_case ,**snake_case ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ,*snake_case ,**snake_case ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
class __snake_case ( metaclass=lowerCAmelCase ):
_a : Optional[Any]= ["torch", "transformers", "onnx"]
def __init__( self ,*snake_case ,**snake_case ):
'''simple docstring'''
requires_backends(self ,["""torch""", """transformers""", """onnx"""] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ,*snake_case ,**snake_case ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ,*snake_case ,**snake_case ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
class __snake_case ( metaclass=lowerCAmelCase ):
_a : Tuple= ["torch", "transformers", "onnx"]
def __init__( self ,*snake_case ,**snake_case ):
'''simple docstring'''
requires_backends(self ,["""torch""", """transformers""", """onnx"""] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ,*snake_case ,**snake_case ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ,*snake_case ,**snake_case ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
class __snake_case ( metaclass=lowerCAmelCase ):
_a : Dict= ["torch", "transformers", "onnx"]
def __init__( self ,*snake_case ,**snake_case ):
'''simple docstring'''
requires_backends(self ,["""torch""", """transformers""", """onnx"""] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ,*snake_case ,**snake_case ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ,*snake_case ,**snake_case ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
class __snake_case ( metaclass=lowerCAmelCase ):
_a : str= ["torch", "transformers", "onnx"]
def __init__( self ,*snake_case ,**snake_case ):
'''simple docstring'''
requires_backends(self ,["""torch""", """transformers""", """onnx"""] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ,*snake_case ,**snake_case ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ,*snake_case ,**snake_case ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
class __snake_case ( metaclass=lowerCAmelCase ):
_a : int= ["torch", "transformers", "onnx"]
def __init__( self ,*snake_case ,**snake_case ):
'''simple docstring'''
requires_backends(self ,["""torch""", """transformers""", """onnx"""] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ,*snake_case ,**snake_case ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ,*snake_case ,**snake_case ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
| 285 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : List[Any] = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __snake_case ( lowerCAmelCase ):
_a : Dict= "mobilenet_v1"
def __init__( self ,snake_case=3 ,snake_case=224 ,snake_case=1.0 ,snake_case=8 ,snake_case="relu6" ,snake_case=True ,snake_case=0.999 ,snake_case=0.02 ,snake_case=0.001 ,**snake_case ,):
'''simple docstring'''
super().__init__(**snake_case )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
lowercase : int = num_channels
lowercase : Union[str, Any] = image_size
lowercase : int = depth_multiplier
lowercase : Tuple = min_depth
lowercase : Dict = hidden_act
lowercase : Dict = tf_padding
lowercase : Dict = classifier_dropout_prob
lowercase : int = initializer_range
lowercase : List[str] = layer_norm_eps
class __snake_case ( lowerCAmelCase ):
_a : int= version.parse("1.11" )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return 1e-4
| 285 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowercase__ ( a__ ):
'''simple docstring'''
A_ : List[str] = """sew-d"""
def __init__( self , __snake_case=32 , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=3072 , __snake_case=2 , __snake_case=512 , __snake_case=256 , __snake_case=True , __snake_case=True , __snake_case=("p2c", "c2p") , __snake_case="layer_norm" , __snake_case="gelu_python" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=0.0 , __snake_case=0.1 , __snake_case=0.02 , __snake_case=1e-7 , __snake_case=1e-5 , __snake_case="group" , __snake_case="gelu" , __snake_case=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __snake_case=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __snake_case=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __snake_case=False , __snake_case=128 , __snake_case=16 , __snake_case=True , __snake_case=0.05 , __snake_case=10 , __snake_case=2 , __snake_case=0.0 , __snake_case=10 , __snake_case=0 , __snake_case="mean" , __snake_case=False , __snake_case=False , __snake_case=256 , __snake_case=0 , __snake_case=1 , __snake_case=2 , **__snake_case , ):
super().__init__(**lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
_SCREAMING_SNAKE_CASE : int = feat_extract_norm
_SCREAMING_SNAKE_CASE : Any = feat_extract_activation
_SCREAMING_SNAKE_CASE : Optional[int] = list(lowerCAmelCase__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = list(lowerCAmelCase__ )
_SCREAMING_SNAKE_CASE : int = list(lowerCAmelCase__ )
_SCREAMING_SNAKE_CASE : List[str] = conv_bias
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_conv_pos_embeddings
_SCREAMING_SNAKE_CASE : str = num_conv_pos_embedding_groups
_SCREAMING_SNAKE_CASE : List[Any] = len(self.conv_dim )
_SCREAMING_SNAKE_CASE : int = num_hidden_layers
_SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
_SCREAMING_SNAKE_CASE : int = squeeze_factor
_SCREAMING_SNAKE_CASE : int = max_position_embeddings
_SCREAMING_SNAKE_CASE : List[Any] = position_buckets
_SCREAMING_SNAKE_CASE : Union[str, Any] = share_att_key
_SCREAMING_SNAKE_CASE : str = relative_attention
_SCREAMING_SNAKE_CASE : Optional[Any] = norm_rel_ebd
_SCREAMING_SNAKE_CASE : Union[str, Any] = list(lowerCAmelCase__ )
_SCREAMING_SNAKE_CASE : Tuple = hidden_act
_SCREAMING_SNAKE_CASE : str = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout
_SCREAMING_SNAKE_CASE : Optional[int] = attention_dropout
_SCREAMING_SNAKE_CASE : str = activation_dropout
_SCREAMING_SNAKE_CASE : str = feat_proj_dropout
_SCREAMING_SNAKE_CASE : Optional[int] = final_dropout
_SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
_SCREAMING_SNAKE_CASE : Dict = feature_layer_norm_eps
_SCREAMING_SNAKE_CASE : Any = initializer_range
_SCREAMING_SNAKE_CASE : List[str] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_SCREAMING_SNAKE_CASE : Union[str, Any] = apply_spec_augment
_SCREAMING_SNAKE_CASE : Tuple = mask_time_prob
_SCREAMING_SNAKE_CASE : Union[str, Any] = mask_time_length
_SCREAMING_SNAKE_CASE : List[Any] = mask_time_min_masks
_SCREAMING_SNAKE_CASE : str = mask_feature_prob
_SCREAMING_SNAKE_CASE : Dict = mask_feature_length
_SCREAMING_SNAKE_CASE : Tuple = mask_feature_min_masks
# ctc loss
_SCREAMING_SNAKE_CASE : List[str] = ctc_loss_reduction
_SCREAMING_SNAKE_CASE : Optional[int] = ctc_zero_infinity
# sequence classification
_SCREAMING_SNAKE_CASE : List[Any] = use_weighted_layer_sum
_SCREAMING_SNAKE_CASE : Tuple = classifier_proj_size
@property
def UpperCAmelCase_ ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 200 |
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ ) -> Optional[int]:
a : List[str] = str(id_ )
a : Optional[Any] = None
a : Tuple = None
a : str = []
a : Any = {} # {vertex:distance}
def __lt__( self , lowerCAmelCase__ ) -> Any:
return self.key < other.key
def __repr__( self ) -> Optional[Any]:
return self.id
def __a ( self , lowerCAmelCase__ ) -> Any:
self.neighbors.append(lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
a : Optional[Any] = weight
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int , _lowercase : int , _lowercase : Union[str, Any] ) ->str:
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _lowercase )
graph[b - 1].add_edge(graph[a - 1] , _lowercase )
def _SCREAMING_SNAKE_CASE ( _lowercase : list , _lowercase : Vertex ) ->list:
'''simple docstring'''
a : int = []
for u in graph:
a : List[str] = math.inf
a : int = None
a : str = 0
a : Union[str, Any] = graph[:]
while q:
a : List[Any] = min(_lowercase )
q.remove(_lowercase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
a : List[Any] = u
a : Optional[int] = u.edges[v.id]
for i in range(1 , len(_lowercase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _SCREAMING_SNAKE_CASE ( _lowercase : list , _lowercase : Vertex ) ->Iterator[tuple]:
'''simple docstring'''
for u in graph:
a : str = math.inf
a : Dict = None
a : Dict = 0
a : List[Any] = list(_lowercase )
hq.heapify(_lowercase )
while h:
a : Dict = hq.heappop(_lowercase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
a : Dict = u
a : Optional[Any] = u.edges[v.id]
hq.heapify(_lowercase )
for i in range(1 , len(_lowercase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _SCREAMING_SNAKE_CASE ( ) ->None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 | 0 |
"""simple docstring"""
from math import ceil, sqrt
def lowercase ( a__ : int = 1000000 ) -> int:
_UpperCamelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
_UpperCamelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
_UpperCamelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F'''{solution() = }''')
| 54 | """simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class UpperCAmelCase_ :
snake_case__ = PegasusConfig
snake_case__ = {}
snake_case__ = '''gelu'''
def __init__( self : Any , __UpperCamelCase : int , __UpperCamelCase : List[str]=13 , __UpperCamelCase : List[Any]=7 , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : int=False , __UpperCamelCase : Optional[Any]=99 , __UpperCamelCase : int=32 , __UpperCamelCase : List[str]=5 , __UpperCamelCase : Tuple=4 , __UpperCamelCase : Optional[Any]=37 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : List[str]=20 , __UpperCamelCase : Union[str, Any]=2 , __UpperCamelCase : Optional[int]=1 , __UpperCamelCase : Dict=0 , ) -> str:
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = eos_token_id
_UpperCamelCase = pad_token_id
_UpperCamelCase = bos_token_id
def _UpperCamelCase ( self : Tuple ) -> List[str]:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_UpperCamelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCamelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCamelCase = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def _UpperCamelCase ( self : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] ) -> str:
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(__UpperCamelCase )
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] )
_UpperCamelCase , _UpperCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__UpperCamelCase , )
_UpperCamelCase = model.decode(__UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def _UpperCamelCase ( self : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] ) -> List[str]:
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(__UpperCamelCase )
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] )
_UpperCamelCase , _UpperCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCamelCase = model.decode(__UpperCamelCase , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase )
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def lowercase ( a__ : Dict , a__ : str , a__ : str , a__ : Optional[int]=None , a__ : str=None , ) -> List[str]:
if attention_mask is None:
_UpperCamelCase = np.not_equal(a__ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_UpperCamelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class UpperCAmelCase_ ( _lowercase , unittest.TestCase):
snake_case__ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
snake_case__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
snake_case__ = True
snake_case__ = False
snake_case__ = False
snake_case__ = False
def _UpperCamelCase ( self : Tuple ) -> List[Any]:
_UpperCamelCase = FlaxPegasusModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=__UpperCamelCase )
def _UpperCamelCase ( self : Any ) -> Tuple:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Any ) -> Dict:
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Union[str, Any] ) -> str:
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Dict ) -> str:
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCamelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = model_class(__UpperCamelCase )
@jax.jit
def encode_jitted(__UpperCamelCase : Dict , __UpperCamelCase : str=None , **__UpperCamelCase : Dict ):
return model.encode(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase )
with self.subTest('''JIT Enabled''' ):
_UpperCamelCase = encode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_UpperCamelCase = encode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _UpperCamelCase ( self : Optional[Any] ) -> Dict:
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCamelCase = model_class(__UpperCamelCase )
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
_UpperCamelCase = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(__UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] ):
return model.decode(
decoder_input_ids=__UpperCamelCase , decoder_attention_mask=__UpperCamelCase , encoder_outputs=__UpperCamelCase , )
with self.subTest('''JIT Enabled''' ):
_UpperCamelCase = decode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_UpperCamelCase = decode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _UpperCamelCase ( self : Union[str, Any] ) -> Dict:
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=__UpperCamelCase )
_UpperCamelCase = np.ones((1, 1) )
_UpperCamelCase = model(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@slow
def _UpperCamelCase ( self : str ) -> Any:
_UpperCamelCase = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' )
_UpperCamelCase = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' )
_UpperCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
_UpperCamelCase = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
_UpperCamelCase = tokenizer(__UpperCamelCase , return_tensors='''np''' , truncation=__UpperCamelCase , max_length=512 , padding=__UpperCamelCase )
_UpperCamelCase = model.generate(**__UpperCamelCase , num_beams=2 ).sequences
_UpperCamelCase = tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
assert tgt_text == decoded
| 54 | 1 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
a__ : Union[str, Any] = {
'''E''': 12.70,
'''T''': 9.06,
'''A''': 8.17,
'''O''': 7.51,
'''I''': 6.97,
'''N''': 6.75,
'''S''': 6.33,
'''H''': 6.09,
'''R''': 5.99,
'''D''': 4.25,
'''L''': 4.03,
'''C''': 2.78,
'''U''': 2.76,
'''M''': 2.41,
'''W''': 2.36,
'''F''': 2.23,
'''G''': 2.02,
'''Y''': 1.97,
'''P''': 1.93,
'''B''': 1.29,
'''V''': 0.98,
'''K''': 0.77,
'''J''': 0.15,
'''X''': 0.15,
'''Q''': 0.10,
'''Z''': 0.07,
}
a__ : Optional[Any] = '''ETAOINSHRDLCUMWFGYPBVKJXQZ'''
a__ : Dict = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return x[0]
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_letter_count(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = "".join(freq_to_letter[freq] )
__SCREAMING_SNAKE_CASE = list(freq_to_letter_str.items() )
freq_pairs.sort(key=lowerCAmelCase_ , reverse=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_frequency_order(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 |
'''simple docstring'''
import argparse
import json
from tqdm import tqdm
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--src_path""" , type=lowerCAmelCase , default="""biencoder-nq-dev.json""" , help="""Path to raw DPR training data""" , )
parser.add_argument(
"""--evaluation_set""" , type=lowerCAmelCase , help="""where to store parsed evaluation_set file""" , )
parser.add_argument(
"""--gold_data_path""" , type=lowerCAmelCase , help="""where to store parsed gold_data_path file""" , )
_lowerCAmelCase = parser.parse_args()
with open(args.src_path , """r""" ) as src_file, open(args.evaluation_set , """w""" ) as eval_file, open(
args.gold_data_path , """w""" ) as gold_file:
_lowerCAmelCase = json.load(lowerCAmelCase )
for dpr_record in tqdm(lowerCAmelCase ):
_lowerCAmelCase = dpr_record["""question"""]
_lowerCAmelCase = [context["""title"""] for context in dpr_record["""positive_ctxs"""]]
eval_file.write(question + """\n""" )
gold_file.write("""\t""".join(lowerCAmelCase ) + """\n""" )
if __name__ == "__main__":
main()
| 70 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case :int = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Optional[Any] = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__snake_case :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 367 |
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
def get_matched_characters(_UpperCAmelCase , _UpperCAmelCase ) -> str:
__a = []
__a = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__a = int(max(0 , i - limit ) )
__a = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_UpperCAmelCase )
__a = f'{_stra[0:_stra.index(_UpperCAmelCase )]} {_stra[_stra.index(_UpperCAmelCase ) + 1:]}'
return "".join(_UpperCAmelCase )
# matching characters
__a = get_matched_characters(_UpperCAmelCase , _UpperCAmelCase )
__a = get_matched_characters(_UpperCAmelCase , _UpperCAmelCase )
__a = len(_UpperCAmelCase )
# transposition
__a = (
len([(ca, ca) for ca, ca in zip(_UpperCAmelCase , _UpperCAmelCase ) if ca != ca] ) // 2
)
if not match_count:
__a = 0.0
else:
__a = (
1
/ 3
* (
match_count / len(_UpperCAmelCase )
+ match_count / len(_UpperCAmelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__a = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('''hello''', '''world'''))
| 131 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCamelCase_ = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 244 |
import torch
from diffusers import StableDiffusionPipeline
lowerCamelCase_ = '''path-to-your-trained-model'''
lowerCamelCase_ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
lowerCamelCase_ = '''A photo of sks dog in a bucket'''
lowerCamelCase_ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 244 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class UpperCamelCase__( __A ):
lowerCAmelCase__ : Dict = 'fnet'
def __init__( self ,__UpperCAmelCase=3_20_00 ,__UpperCAmelCase=7_68 ,__UpperCAmelCase=12 ,__UpperCAmelCase=30_72 ,__UpperCAmelCase="gelu_new" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=5_12 ,__UpperCAmelCase=4 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1e-12 ,__UpperCAmelCase=False ,__UpperCAmelCase=5_12 ,__UpperCAmelCase=3 ,__UpperCAmelCase=1 ,__UpperCAmelCase=2 ,**__UpperCAmelCase ,) -> Any:
super().__init__(pad_token_id=__UpperCAmelCase ,bos_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase ,**__UpperCAmelCase )
A__ = vocab_size
A__ = max_position_embeddings
A__ = hidden_size
A__ = num_hidden_layers
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = initializer_range
A__ = type_vocab_size
A__ = layer_norm_eps
A__ = use_tpu_fourier_optimizations
A__ = tpu_short_seq_length
| 371 | """simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
"""simple docstring"""
A__ = len(UpperCamelCase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(UpperCamelCase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , UpperCamelCase__ , UpperCamelCase__ , )
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = []
depth_first_search([] , [] , [] , UpperCamelCase__ , UpperCamelCase__ )
# Print all the boards
for board in boards:
for column in board:
print(UpperCamelCase__ )
print('' )
print(len(UpperCamelCase__ ) , 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 154 | 0 |
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
_lowercase : List[Any] = True
from torch.cuda.amp import autocast
_lowercase : str = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
a__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
a__ : Optional[str] = field(
default=__lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
a__ : Optional[bool] = field(
default=__lowercase , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
a__ : Optional[bool] = field(
default=__lowercase , metadata={"help": "Whether to log verbose messages or not."} , )
a__ : Optional[float] = field(
default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."} )
a__ : Optional[float] = field(
default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."} )
a__ : Optional[float] = field(
default=0.99_9995 , metadata={"help": "Decay of gumbel temperature during training."} )
def lowercase__ ( snake_case_ :ModelArguments , snake_case_ :TrainingArguments ):
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
__UpperCAmelCase = logging.WARNING
if model_args.verbose_logging:
__UpperCAmelCase = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
__UpperCAmelCase = logging.INFO
logger.setLevel(snake_case_ )
@dataclass
class _UpperCAmelCase :
a__ : str = field(
default=__lowercase , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
a__ : Optional[str] = field(
default=__lowercase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
a__ : Optional[str] = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to \'train\'"
} , )
a__ : Optional[str] = field(
default="validation" , metadata={
"help": (
"The name of the validation data set split to use (via the datasets library). Defaults to \'validation\'"
)
} , )
a__ : Optional[str] = field(
default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to \'file\'"} , )
a__ : bool = field(
default=__lowercase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
a__ : Optional[int] = field(
default=1 , metadata={
"help": "The percentage of the train set used as validation set in case there\'s no validation split"
} , )
a__ : Optional[int] = field(
default=__lowercase , metadata={"help": "The number of processes to use for the preprocessing."} , )
a__ : Optional[float] = field(
default=20.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} )
@dataclass
class _UpperCAmelCase :
a__ : WavaVecaForPreTraining
a__ : WavaVecaFeatureExtractor
a__ : Union[bool, str] = "longest"
a__ : Optional[int] = None
a__ : Optional[int] = None
def __call__( self : Union[str, Any] , _lowercase : List[Dict[str, Union[List[int], torch.Tensor]]] ):
# reformat list to dict and set to pytorch format
__UpperCAmelCase = self.feature_extractor.pad(
UpperCAmelCase__ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
__UpperCAmelCase = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] )
__UpperCAmelCase = batch['''input_values'''].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
__UpperCAmelCase = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to(
torch.long )
__UpperCAmelCase = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['''input_values'''].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
__UpperCAmelCase = 1
__UpperCAmelCase = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
__UpperCAmelCase = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=UpperCAmelCase__ , min_masks=2 , )
return batch
class _UpperCAmelCase ( __lowercase ):
def __init__( self : Optional[int] , *_lowercase : Optional[int] , _lowercase : Optional[int]=1 , _lowercase : Dict=0 , _lowercase : List[str]=1.0 , **_lowercase : int ):
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
__UpperCAmelCase = 0
__UpperCAmelCase = max_gumbel_temp
__UpperCAmelCase = min_gumbel_temp
__UpperCAmelCase = gumbel_temp_decay
def a ( self : Any , _lowercase : nn.Module , _lowercase : Dict[str, Union[torch.Tensor, Any]] ):
model.train()
__UpperCAmelCase = self._prepare_inputs(UpperCAmelCase__ )
if self.use_amp:
with autocast():
__UpperCAmelCase = self.compute_loss(UpperCAmelCase__ , UpperCAmelCase__ )
else:
__UpperCAmelCase = self.compute_loss(UpperCAmelCase__ , UpperCAmelCase__ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
__UpperCAmelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__UpperCAmelCase = loss.sum() / (inputs['''mask_time_indices''']).sum()
else:
raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
__UpperCAmelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(UpperCAmelCase__ ).backward()
elif self.use_apex:
with amp.scale_loss(UpperCAmelCase__ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(UpperCAmelCase__ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def lowercase__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_args_into_dataclasses()
configure_logger(snake_case_ , snake_case_ )
# Downloading and loading a dataset from the hub.
__UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
__UpperCAmelCase = DatasetDict()
__UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
__UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
__UpperCAmelCase = DatasetDict()
__UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='''validation''' , cache_dir=model_args.cache_dir , )
__UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
__UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=snake_case_ )
def prepare_dataset(snake_case_ :Optional[Any] ):
# check that all files have the correct sampling rate
__UpperCAmelCase , __UpperCAmelCase = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
__UpperCAmelCase = datasets.map(
snake_case_ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['''train'''].column_names )
# filter audio files that are too long
__UpperCAmelCase = vectorized_datasets.filter(
lambda snake_case_ : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(snake_case_ :Dict ):
return feature_extractor(batch['''speech'''] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
__UpperCAmelCase = vectorized_datasets.map(
snake_case_ , batched=snake_case_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['''train'''].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
__UpperCAmelCase = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'''
''' ``config.feat_extract_norm=\'layer\'''' )
__UpperCAmelCase = WavaVecaForPreTraining(snake_case_ )
__UpperCAmelCase = DataCollatorForWavaVecaPretraining(model=snake_case_ , feature_extractor=snake_case_ )
__UpperCAmelCase = WavaVecaPreTrainer(
model=snake_case_ , data_collator=snake_case_ , args=snake_case_ , train_dataset=vectorized_datasets['''train'''] , eval_dataset=vectorized_datasets['''validation'''] , tokenizer=snake_case_ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 332 |
'''simple docstring'''
import os
__snake_case ={"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000}
def a_ ( lowerCamelCase : str ):
lowerCAmelCase = 0
lowerCAmelCase = 0
while index < len(lowerCamelCase ) - 1:
lowerCAmelCase = SYMBOLS[numerals[index]]
lowerCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = ''
lowerCAmelCase = num // 1000
numerals += m_count * "M"
num %= 1000
lowerCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
lowerCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def a_ ( lowerCamelCase : str = "/p089_roman.txt" ):
lowerCAmelCase = 0
with open(os.path.dirname(lowerCamelCase ) + roman_numerals_filename ) as filea:
lowerCAmelCase = filea.readlines()
for line in lines:
lowerCAmelCase = line.strip()
lowerCAmelCase = parse_roman_numerals(lowerCamelCase )
lowerCAmelCase = generate_roman_numerals(lowerCamelCase )
savings += len(lowerCamelCase ) - len(lowerCamelCase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 | 0 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def UpperCamelCase__( self ):
'''simple docstring'''
__A : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCamelCase , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(__lowerCamelCase , '''num_heads''' ) )
class __snake_case :
"""simple docstring"""
def __init__( self , __lowerCamelCase , __lowerCamelCase=13 , __lowerCamelCase=64 , __lowerCamelCase=3 , __lowerCamelCase=[16, 48, 96] , __lowerCamelCase=[1, 3, 6] , __lowerCamelCase=[1, 2, 10] , __lowerCamelCase=[7, 3, 3] , __lowerCamelCase=[4, 2, 2] , __lowerCamelCase=[2, 1, 1] , __lowerCamelCase=[2, 2, 2] , __lowerCamelCase=[False, False, True] , __lowerCamelCase=[0.0, 0.0, 0.0] , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-1_2 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=2 , ):
'''simple docstring'''
__A : Optional[int] = parent
__A : Tuple = batch_size
__A : int = image_size
__A : Optional[int] = patch_sizes
__A : List[Any] = patch_stride
__A : Any = patch_padding
__A : Dict = is_training
__A : Tuple = use_labels
__A : Optional[Any] = num_labels
__A : List[Any] = num_channels
__A : Union[str, Any] = embed_dim
__A : List[Any] = num_heads
__A : Tuple = stride_kv
__A : List[Any] = depth
__A : List[Any] = cls_token
__A : List[Any] = attention_drop_rate
__A : Optional[int] = initializer_range
__A : List[Any] = layer_norm_eps
def UpperCamelCase__( self ):
'''simple docstring'''
__A : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A : Optional[Any] = None
if self.use_labels:
# create a random int32 tensor of given shape
__A : List[str] = ids_tensor([self.batch_size] , self.num_labels )
__A : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__( self ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Optional[Any] = TFCvtModel(config=__lowerCamelCase )
__A : List[str] = model(__lowerCamelCase , training=__lowerCamelCase )
__A : Optional[Any] = (self.image_size, self.image_size)
__A , __A : int = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__A : Optional[Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__A : List[str] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : List[str] = self.num_labels
__A : Dict = TFCvtForImageClassification(__lowerCamelCase )
__A : str = model(__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[str] = self.prepare_config_and_inputs()
__A , __A , __A : Optional[int] = config_and_inputs
__A : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __snake_case ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
_lowerCamelCase = (
{"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification}
if is_tf_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCamelCase__( self ):
'''simple docstring'''
__A : str = TFCvtModelTester(self )
__A : Union[str, Any] = TFCvtConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def UpperCamelCase__( self ):
'''simple docstring'''
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='''Cvt does not output attentions''' )
def UpperCamelCase__( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def UpperCamelCase__( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def UpperCamelCase__( self ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
def UpperCamelCase__( self ):
'''simple docstring'''
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def UpperCamelCase__( self ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='''Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8''' )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Dict = tf.keras.mixed_precision.Policy('''mixed_float16''' )
tf.keras.mixed_precision.set_global_policy(__lowerCamelCase )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('''float32''' )
def UpperCamelCase__( self ):
'''simple docstring'''
__A , __A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Tuple = model_class(__lowerCamelCase )
__A : int = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : Any = [*signature.parameters.keys()]
__A : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
def check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__A : Dict = model_class(__lowerCamelCase )
__A : Any = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__A : str = outputs.hidden_states
__A : List[str] = len(self.model_tester.depth )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__A , __A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Optional[Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : Dict = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def UpperCamelCase__( self ):
'''simple docstring'''
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : Optional[Any] = TFCvtModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def __lowercase ( ) ->Optional[int]:
'''simple docstring'''
__A : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCamelCase__( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[str] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__A : List[str] = self.default_image_processor
__A : Union[str, Any] = prepare_img()
__A : List[Any] = image_processor(images=__lowerCamelCase , return_tensors='''tf''' )
# forward pass
__A : Any = model(**__lowerCamelCase )
# verify the logits
__A : Dict = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
__A : Dict = tf.constant([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __lowerCamelCase , atol=1e-4 ) )
| 291 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a_ = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE__ )
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , **__lowerCamelCase ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
return super().__call__(__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase__( self , **__lowerCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = {}
if "candidate_labels" in kwargs:
__A : Tuple = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__A : List[str] = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase="This is a photo of {}." ):
'''simple docstring'''
__A : Optional[int] = load_image(__lowerCamelCase )
__A : Optional[int] = self.image_processor(images=[image] , return_tensors=self.framework )
__A : int = candidate_labels
__A : int = [hypothesis_template.format(__lowerCamelCase ) for x in candidate_labels]
__A : Dict = self.tokenizer(__lowerCamelCase , return_tensors=self.framework , padding=__lowerCamelCase )
__A : int = [text_inputs]
return inputs
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Optional[int] = model_inputs.pop('''candidate_labels''' )
__A : str = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , __lowerCamelCase ):
__A : Union[str, Any] = text_inputs[0]
else:
# Batching case.
__A : str = text_inputs[0][0]
__A : List[str] = self.model(**__lowerCamelCase , **__lowerCamelCase )
__A : Dict = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Optional[int] = model_outputs.pop('''candidate_labels''' )
__A : int = model_outputs['''logits'''][0]
if self.framework == "pt":
__A : Union[str, Any] = logits.softmax(dim=-1 ).squeeze(-1 )
__A : Dict = probs.tolist()
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
__A : List[Any] = [scores]
elif self.framework == "tf":
__A : List[Any] = stable_softmax(__lowerCamelCase , axis=-1 )
__A : str = probs.numpy().tolist()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
__A : str = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__lowerCamelCase , __lowerCamelCase ) , key=lambda __lowerCamelCase : -x[0] )
]
return result
| 291 | 1 |
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def UpperCAmelCase_ ( __lowercase : List[str] ) -> List[str]:
'''simple docstring'''
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def UpperCAmelCase_ ( __lowercase : Optional[Any] ) -> Any:
'''simple docstring'''
class A_ :
def __init__( self : Optional[int] , snake_case_ : str ):
_UpperCAmelCase = metric_id
class A_ :
_lowerCamelCase : Any = [MetricMock(lowerCAmelCase_ ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def lowercase ( self : Tuple ):
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : List[str] ) -> int:
'''simple docstring'''
if "tmp_path" in args:
_UpperCAmelCase = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(__lowercase , match="https://huggingface.co/docs/evaluate" ):
func(*__lowercase )
| 22 |
'''simple docstring'''
import math
def UpperCAmelCase_ ( __lowercase : int ) -> bool:
'''simple docstring'''
return math.sqrt(__lowercase ) * math.sqrt(__lowercase ) == num
def UpperCAmelCase_ ( __lowercase : int ) -> bool:
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = n
while left <= right:
_UpperCAmelCase = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
_UpperCAmelCase = mid - 1
else:
_UpperCAmelCase = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 1 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
F'''{test_file} instead.''' )
snake_case = components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
snake_case = components[:-1] + [test_fn.replace('''.py''' ,'''''' )]
snake_case = '''.'''.join(UpperCamelCase_ )
return test_module_path
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = get_module_path(UpperCamelCase_ )
snake_case = importlib.import_module(UpperCamelCase_ )
return test_module
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = []
snake_case = get_test_module(UpperCamelCase_ )
for attr in dir(UpperCamelCase_ ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(UpperCamelCase_ ,UpperCamelCase_ ) )
# sort with class names
return sorted(UpperCamelCase_ ,key=lambda UpperCamelCase_ : x.__name__ )
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = []
snake_case = get_test_module(UpperCamelCase_ )
for attr in dir(UpperCamelCase_ ):
snake_case = getattr(UpperCamelCase_ ,UpperCamelCase_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
snake_case = getattr(UpperCamelCase_ ,'''all_model_classes''' ,[] )
if len(UpperCamelCase_ ) > 0:
test_classes.append(UpperCamelCase_ )
# sort with class names
return sorted(UpperCamelCase_ ,key=lambda UpperCamelCase_ : x.__name__ )
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = get_test_classes(UpperCamelCase_ )
snake_case = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(UpperCamelCase_ ,key=lambda UpperCamelCase_ : x.__name__ )
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = test_class()
if hasattr(UpperCamelCase_ ,'''setUp''' ):
test.setUp()
snake_case = None
if hasattr(UpperCamelCase_ ,'''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
snake_case = test.model_tester.__class__
return model_tester
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = get_test_classes(UpperCamelCase_ )
snake_case = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(UpperCamelCase_ )
# sort with class names
return sorted(UpperCamelCase_ ,key=lambda UpperCamelCase_ : x.__name__ )
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = get_test_classes_for_model(UpperCamelCase_ ,UpperCamelCase_ )
snake_case = []
for test_class in test_classes:
snake_case = get_model_tester_from_test_class(UpperCamelCase_ )
if tester_class is not None:
tester_classes.append(UpperCamelCase_ )
# sort with class names
return sorted(UpperCamelCase_ ,key=lambda UpperCamelCase_ : x.__name__ )
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = get_test_classes(UpperCamelCase_ )
snake_case = {test_class: get_model_tester_from_test_class(UpperCamelCase_ ) for test_class in test_classes}
return test_tester_mapping
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = get_model_classes(UpperCamelCase_ )
snake_case = {
model_class: get_test_classes_for_model(UpperCamelCase_ ,UpperCamelCase_ ) for model_class in model_classes
}
return model_test_mapping
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = get_model_classes(UpperCamelCase_ )
snake_case = {
model_class: get_tester_classes_for_model(UpperCamelCase_ ,UpperCamelCase_ ) for model_class in model_classes
}
return model_to_tester_mapping
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
if isinstance(UpperCamelCase_ ,UpperCamelCase_ ):
return o
elif isinstance(UpperCamelCase_ ,UpperCamelCase_ ):
return o.__name__
elif isinstance(UpperCamelCase_ ,(list, tuple) ):
return [to_json(UpperCamelCase_ ) for x in o]
elif isinstance(UpperCamelCase_ ,UpperCamelCase_ ):
return {to_json(UpperCamelCase_ ): to_json(UpperCamelCase_ ) for k, v in o.items()}
else:
return o
| 213 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = UnCLIPImageVariationPipeline
__magic_name__ = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
__magic_name__ = IMAGE_VARIATION_BATCH_PARAMS
__magic_name__ = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
__magic_name__ = False
@property
def a_ ( self ):
return 3_2
@property
def a_ ( self ):
return 3_2
@property
def a_ ( self ):
return self.time_input_dim
@property
def a_ ( self ):
return self.time_input_dim * 4
@property
def a_ ( self ):
return 1_0_0
@property
def a_ ( self ):
snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(__snake_case )
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=3_2 , intermediate_size=3_7 , patch_size=1 , )
return CLIPVisionModelWithProjection(__snake_case )
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = {
'''clip_embeddings_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''cross_attention_dim''': self.cross_attention_dim,
}
snake_case = UnCLIPTextProjModel(**__snake_case )
return model
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = {
'''sample_size''': 3_2,
# RGB in channels
'''in_channels''': 3,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 6,
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': '''identity''',
}
snake_case = UNetaDConditionModel(**__snake_case )
return model
@property
def a_ ( self ):
return {
"sample_size": 6_4,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def a_ ( self ):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
snake_case = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def a_ ( self ):
snake_case = self.dummy_decoder
snake_case = self.dummy_text_proj
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_super_res_first
snake_case = self.dummy_super_res_last
snake_case = UnCLIPScheduler(
variance_type='''learned_range''' , prediction_type='''epsilon''' , num_train_timesteps=1_0_0_0 , )
snake_case = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''epsilon''' , num_train_timesteps=1_0_0_0 , )
snake_case = CLIPImageProcessor(crop_size=3_2 , size=3_2 )
snake_case = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def a_ ( self , __snake_case , __snake_case=0 , __snake_case=True ):
snake_case = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__snake_case ) ).to(__snake_case )
if str(__snake_case ).startswith('''mps''' ):
snake_case = torch.manual_seed(__snake_case )
else:
snake_case = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
if pil_image:
snake_case = input_image * 0.5 + 0.5
snake_case = input_image.clamp(0 , 1 )
snake_case = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
snake_case = DiffusionPipeline.numpy_to_pil(__snake_case )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def a_ ( self ):
snake_case = '''cpu'''
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(**__snake_case )
snake_case = output.images
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(
**__snake_case , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self ):
snake_case = '''cpu'''
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(**__snake_case )
snake_case = output.images
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(
**__snake_case , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self ):
snake_case = '''cpu'''
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = [
pipeline_inputs['''image'''],
pipeline_inputs['''image'''],
]
snake_case = pipe(**__snake_case )
snake_case = output.images
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = [
tuple_pipeline_inputs['''image'''],
tuple_pipeline_inputs['''image'''],
]
snake_case = pipe(
**__snake_case , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 6_4, 6_4, 3)
snake_case = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self ):
snake_case = torch.device('''cpu''' )
class A__ :
"""simple docstring"""
__magic_name__ = 1
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = torch.Generator(device=__snake_case ).manual_seed(0 )
snake_case = pipe.decoder.dtype
snake_case = 1
snake_case = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
snake_case = pipe.prepare_latents(
__snake_case , dtype=__snake_case , device=__snake_case , generator=__snake_case , latents=__snake_case , scheduler=DummyScheduler() )
snake_case = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
snake_case = pipe.prepare_latents(
__snake_case , dtype=__snake_case , device=__snake_case , generator=__snake_case , latents=__snake_case , scheduler=DummyScheduler() )
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(
**__snake_case , decoder_latents=__snake_case , super_res_latents=__snake_case ).images
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
# Don't pass image, instead pass embedding
snake_case = pipeline_inputs.pop('''image''' )
snake_case = pipe.image_encoder(__snake_case ).image_embeds
snake_case = pipe(
**__snake_case , decoder_latents=__snake_case , super_res_latents=__snake_case , image_embeddings=__snake_case , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def a_ ( self ):
snake_case = torch_device == '''cpu'''
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
snake_case = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=__snake_case , expected_max_diff=__snake_case )
@skip_mps
def a_ ( self ):
snake_case = torch_device == '''cpu'''
snake_case = True
snake_case = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
self._test_inference_batch_single_identical(
test_max_difference=__snake_case , relax_max_difference=__snake_case , additional_params_copy_to_batched_inputs=__snake_case , )
def a_ ( self ):
snake_case = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
snake_case = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=__snake_case , additional_params_copy_to_batched_inputs=__snake_case , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__snake_case )
@skip_mps
def a_ ( self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a_ ( self ):
return super().test_save_load_local()
@skip_mps
def a_ ( self ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def a_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self ):
snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' )
snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' )
snake_case = UnCLIPImageVariationPipeline.from_pretrained(
'''kakaobrain/karlo-v1-alpha-image-variations''' , torch_dtype=torch.floataa )
snake_case = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
snake_case = torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case = pipeline(
__snake_case , generator=__snake_case , output_type='''np''' , )
snake_case = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert_mean_pixel_difference(__snake_case , __snake_case , 1_5 )
| 213 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
A__ : Optional[Any] =logging.get_logger(__name__)
A__ : int ={
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class UpperCAmelCase ( snake_case_ ):
_lowercase: List[str] = '''gptj'''
_lowercase: List[Any] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : List[str] , __snake_case : Optional[Any]=5_04_00 , __snake_case : Tuple=20_48 , __snake_case : Optional[int]=40_96 , __snake_case : Tuple=28 , __snake_case : Optional[int]=16 , __snake_case : Optional[int]=64 , __snake_case : Optional[int]=None , __snake_case : Optional[Any]="gelu_new" , __snake_case : Union[str, Any]=0.0 , __snake_case : Any=0.0 , __snake_case : List[str]=0.0 , __snake_case : str=1E-5 , __snake_case : Tuple=0.02 , __snake_case : Optional[Any]=True , __snake_case : Union[str, Any]=5_02_56 , __snake_case : int=5_02_56 , __snake_case : Any=False , **__snake_case : Optional[int] , ) -> Any:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = n_positions
_lowerCAmelCase = n_embd
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = n_inner
_lowerCAmelCase = rotary_dim
_lowerCAmelCase = activation_function
_lowerCAmelCase = resid_pdrop
_lowerCAmelCase = embd_pdrop
_lowerCAmelCase = attn_pdrop
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = use_cache
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
super().__init__(
bos_token_id=__snake_case , eos_token_id=__snake_case , tie_word_embeddings=__snake_case , **__snake_case )
class UpperCAmelCase ( snake_case_ ):
def __init__( self : List[Any] , __snake_case : PretrainedConfig , __snake_case : str = "default" , __snake_case : List[PatchingSpec] = None , __snake_case : bool = False , ) -> Optional[int]:
super().__init__(__snake_case , task=__snake_case , patching_specs=__snake_case , use_past=__snake_case )
if not getattr(self._config , """pad_token_id""" , __snake_case ):
# TODO: how to do that better?
_lowerCAmelCase = 0
@property
def lowercase__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
_lowerCAmelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__snake_case , direction="""inputs""" )
_lowerCAmelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
_lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowercase__ ( self : Union[str, Any] ) -> int:
return self._config.n_layer
@property
def lowercase__ ( self : Dict ) -> int:
return self._config.n_head
def lowercase__ ( self : Dict , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = super(__snake_case , self ).generate_dummy_inputs(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
# We need to order the input in the way they appears in the forward()
_lowerCAmelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
_lowerCAmelCase = seqlen + 2
_lowerCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowerCAmelCase = [
(torch.zeros(__snake_case ), torch.zeros(__snake_case )) for _ in range(self.num_layers )
]
_lowerCAmelCase = common_inputs["""attention_mask"""]
if self.use_past:
_lowerCAmelCase = ordered_inputs["""attention_mask"""].dtype
_lowerCAmelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__snake_case , __snake_case , dtype=__snake_case )] , dim=1 )
return ordered_inputs
@property
def lowercase__ ( self : Optional[int] ) -> int:
return 13
| 70 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class A :
def __init__( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Dict=13 , lowercase_ : List[Any]=7 , lowercase_ : List[Any]=True , lowercase_ : Any=True , lowercase_ : Optional[int]=False , lowercase_ : Tuple=True , lowercase_ : List[str]=99 , lowercase_ : Union[str, Any]=32 , lowercase_ : List[Any]=5 , lowercase_ : Dict=4 , lowercase_ : List[Any]=37 , lowercase_ : Any="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : List[Any]=512 , lowercase_ : Any=16 , lowercase_ : Optional[int]=2 , lowercase_ : Any=0.02 , lowercase_ : Dict=3 , lowercase_ : Union[str, Any]=4 , lowercase_ : Tuple=None , ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] =parent
_lowerCamelCase : Tuple =batch_size
_lowerCamelCase : Any =seq_length
_lowerCamelCase : int =is_training
_lowerCamelCase : int =use_input_mask
_lowerCamelCase : List[str] =use_token_type_ids
_lowerCamelCase : Dict =use_labels
_lowerCamelCase : int =vocab_size
_lowerCamelCase : Optional[Any] =hidden_size
_lowerCamelCase : Union[str, Any] =num_hidden_layers
_lowerCamelCase : Any =num_attention_heads
_lowerCamelCase : Tuple =intermediate_size
_lowerCamelCase : List[str] =hidden_act
_lowerCamelCase : int =hidden_dropout_prob
_lowerCamelCase : Optional[int] =attention_probs_dropout_prob
_lowerCamelCase : Any =max_position_embeddings
_lowerCamelCase : Optional[Any] =type_vocab_size
_lowerCamelCase : List[Any] =type_sequence_label_size
_lowerCamelCase : Union[str, Any] =initializer_range
_lowerCamelCase : Dict =num_labels
_lowerCamelCase : Optional[Any] =num_choices
_lowerCamelCase : Dict =scope
def lowerCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_lowerCamelCase : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : Union[str, Any] =None
if self.use_input_mask:
_lowerCamelCase : Dict =random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : str =None
if self.use_token_type_ids:
_lowerCamelCase : Dict =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase : Tuple =None
_lowerCamelCase : Optional[Any] =None
_lowerCamelCase : Optional[Any] =None
if self.use_labels:
_lowerCamelCase : int =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : List[Any] =ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : Optional[Any] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
def lowerCamelCase ( self : Tuple , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : Dict =LlamaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
_lowerCamelCase : List[str] =model(lowercase_ , attention_mask=lowercase_ )
_lowerCamelCase : List[str] =model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self : int , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : str , lowercase_ : Dict , lowercase_ : str , ) -> Any:
"""simple docstring"""
_lowerCamelCase : str =True
_lowerCamelCase : str =LlamaModel(lowercase_ )
model.to(lowercase_ )
model.eval()
_lowerCamelCase : int =model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
_lowerCamelCase : str =model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
_lowerCamelCase : List[Any] =model(lowercase_ , attention_mask=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self : Dict , lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Any , ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : int =LlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
_lowerCamelCase : int =model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self : int , lowercase_ : str , lowercase_ : Dict , lowercase_ : str , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , ) -> List[str]:
"""simple docstring"""
_lowerCamelCase : List[str] =True
_lowerCamelCase : Tuple =True
_lowerCamelCase : str =LlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
# first forward pass
_lowerCamelCase : int =model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
_lowerCamelCase : Any =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowerCamelCase : Union[str, Any] =ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCamelCase : Optional[Any] =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_lowerCamelCase : Tuple =torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCamelCase : Optional[Any] =torch.cat([input_mask, next_mask] , dim=-1 )
_lowerCamelCase : Optional[int] =model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
_lowerCamelCase : Tuple =model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
# select random slice
_lowerCamelCase : int =ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCamelCase : Optional[Any] =output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCamelCase : Optional[int] =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
def lowerCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
_lowerCamelCase : int =self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Optional[Any] =config_and_inputs
_lowerCamelCase : Union[str, Any] ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
UpperCamelCase__ : Optional[Any] =(LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
UpperCamelCase__ : Union[str, Any] =(LlamaForCausalLM,) if is_torch_available() else ()
UpperCamelCase__ : List[Any] =(
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ : Optional[Any] =False
UpperCamelCase__ : Tuple =False
def lowerCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =LlamaModelTester(self )
_lowerCamelCase : Any =ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def lowerCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self : Dict ) -> str:
"""simple docstring"""
_lowerCamelCase : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def lowerCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : List[str] =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCamelCase : Tuple =type
self.model_tester.create_and_check_model(*lowercase_ )
def lowerCamelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : str =3
_lowerCamelCase : str =input_dict['input_ids']
_lowerCamelCase : int =input_ids.ne(1 ).to(lowercase_ )
_lowerCamelCase : Optional[Any] =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCamelCase : Union[str, Any] =LlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
_lowerCamelCase : Dict =model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] =3
_lowerCamelCase : List[Any] ='single_label_classification'
_lowerCamelCase : List[str] =input_dict['input_ids']
_lowerCamelCase : Any =input_ids.ne(1 ).to(lowercase_ )
_lowerCamelCase : List[str] =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCamelCase : str =LlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
_lowerCamelCase : Optional[int] =model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : int =3
_lowerCamelCase : Optional[Any] ='multi_label_classification'
_lowerCamelCase : str =input_dict['input_ids']
_lowerCamelCase : Tuple =input_ids.ne(1 ).to(lowercase_ )
_lowerCamelCase : Tuple =ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_lowerCamelCase : Optional[Any] =LlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
_lowerCamelCase : Optional[Any] =model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def lowerCamelCase ( self : int ) -> str:
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def lowerCamelCase ( self : Optional[int] , lowercase_ : List[Any] ) -> int:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Union[str, Any] =ids_tensor([1, 10] , config.vocab_size )
_lowerCamelCase : List[str] =ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_lowerCamelCase : Optional[int] =LlamaModel(lowercase_ )
original_model.to(lowercase_ )
original_model.eval()
_lowerCamelCase : List[Any] =original_model(lowercase_ ).last_hidden_state
_lowerCamelCase : Dict =original_model(lowercase_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_lowerCamelCase : Dict ={'type': scaling_type, 'factor': 10.0}
_lowerCamelCase : int =LlamaModel(lowercase_ )
scaled_model.to(lowercase_ )
scaled_model.eval()
_lowerCamelCase : Optional[int] =scaled_model(lowercase_ ).last_hidden_state
_lowerCamelCase : Dict =scaled_model(lowercase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
@require_torch
class A ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] =[1, 306, 4658, 278, 6593, 310, 2834, 338]
_lowerCamelCase : Optional[Any] =LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
_lowerCamelCase : int =model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
_lowerCamelCase : int =torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowerCamelCase : Dict =torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowercase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCamelCase ( self : int ) -> Any:
"""simple docstring"""
_lowerCamelCase : Tuple =[1, 306, 4658, 278, 6593, 310, 2834, 338]
_lowerCamelCase : Optional[Any] =LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
_lowerCamelCase : List[Any] =model(torch.tensor(lowercase_ ) )
# Expected mean on dim = -1
_lowerCamelCase : str =torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowerCamelCase : List[str] =torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowercase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =[1, 306, 4658, 278, 6593, 310, 2834, 338]
_lowerCamelCase : str =LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
_lowerCamelCase : int =model(torch.tensor(lowercase_ ) )
# Expected mean on dim = -1
_lowerCamelCase : str =torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowerCamelCase : Union[str, Any] =torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def lowerCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : List[str] =[1, 306, 4658, 278, 6593, 310, 2834, 338]
_lowerCamelCase : Any =LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
_lowerCamelCase : Optional[Any] =model(torch.tensor(lowercase_ ) )
_lowerCamelCase : Optional[int] =torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 )
# fmt: off
_lowerCamelCase : int =torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowercase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def lowerCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : Tuple ='Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
_lowerCamelCase : Union[str, Any] ='Simply put, the theory of relativity states that '
_lowerCamelCase : int =LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
_lowerCamelCase : str =tokenizer.encode(lowercase_ , return_tensors='pt' )
_lowerCamelCase : List[Any] =LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=lowercase_ )
# greedy generation outputs
_lowerCamelCase : str =model.generate(lowercase_ , max_new_tokens=64 , top_p=lowercase_ , temperature=1 , do_sample=lowercase_ )
_lowerCamelCase : Tuple =tokenizer.decode(generated_ids[0] , skip_special_tokens=lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
| 199 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Any = logging.get_logger(__name__)
_lowercase : Optional[Any] = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class __magic_name__ ( __SCREAMING_SNAKE_CASE):
UpperCamelCase__ = "transfo-xl"
UpperCamelCase__ = ["mems"]
UpperCamelCase__ = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Dict , lowercase_ : Optional[int]=267735 , lowercase_ : int=[20000, 40000, 200000] , lowercase_ : List[Any]=1024 , lowercase_ : Any=1024 , lowercase_ : List[Any]=16 , lowercase_ : Any=64 , lowercase_ : List[Any]=4096 , lowercase_ : List[str]=4 , lowercase_ : Optional[Any]=False , lowercase_ : str=18 , lowercase_ : Dict=1600 , lowercase_ : str=1000 , lowercase_ : int=True , lowercase_ : List[str]=True , lowercase_ : Union[str, Any]=0 , lowercase_ : Tuple=-1 , lowercase_ : List[str]=True , lowercase_ : int=0.1 , lowercase_ : List[Any]=0.0 , lowercase_ : List[Any]=True , lowercase_ : str="normal" , lowercase_ : List[Any]=0.01 , lowercase_ : List[str]=0.01 , lowercase_ : Optional[int]=0.02 , lowercase_ : Any=1E-5 , lowercase_ : List[Any]=0 , **lowercase_ : Union[str, Any] , ):
lowercase_ : Dict = vocab_size
lowercase_ : Tuple = []
self.cutoffs.extend(_snake_case )
if proj_share_all_but_first:
lowercase_ : Dict = [False] + [True] * len(self.cutoffs )
else:
lowercase_ : Union[str, Any] = [False] + [False] * len(self.cutoffs )
lowercase_ : Optional[Any] = d_model
lowercase_ : Dict = d_embed
lowercase_ : Any = d_head
lowercase_ : str = d_inner
lowercase_ : List[str] = div_val
lowercase_ : Tuple = pre_lnorm
lowercase_ : Any = n_layer
lowercase_ : Dict = n_head
lowercase_ : List[str] = mem_len
lowercase_ : Any = same_length
lowercase_ : List[Any] = attn_type
lowercase_ : Tuple = clamp_len
lowercase_ : Optional[Any] = sample_softmax
lowercase_ : List[Any] = adaptive
lowercase_ : Tuple = dropout
lowercase_ : Optional[Any] = dropatt
lowercase_ : Any = untie_r
lowercase_ : List[str] = init
lowercase_ : List[str] = init_range
lowercase_ : Optional[int] = proj_init_std
lowercase_ : Union[str, Any] = init_std
lowercase_ : List[Any] = layer_norm_epsilon
super().__init__(eos_token_id=_snake_case , **_snake_case )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
# Message copied from Transformer-XL documentation
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : List[str] ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 350 | '''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] ) -> List[Any]:
# Initialise PyTorch model
lowercase_ : List[str] = FunnelConfig.from_json_file(UpperCAmelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
lowercase_ : Dict = FunnelBaseModel(UpperCAmelCase__ ) if base_model else FunnelModel(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
_lowercase : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 21 | 0 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class _A:
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=99 , _A=32 , _A=5 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=128 , _A=32 , _A=16 , _A=2 , _A=0.0_2 , _A=3 , _A=4 , _A=None , ):
__A : Any = parent
__A : List[Any] = batch_size
__A : List[Any] = seq_length
__A : str = is_training
__A : Tuple = use_input_mask
__A : List[Any] = use_token_type_ids
__A : List[str] = use_labels
__A : Any = vocab_size
__A : List[Any] = hidden_size
__A : Optional[Any] = num_hidden_layers
__A : List[str] = num_attention_heads
__A : int = intermediate_size
__A : Optional[Any] = hidden_act
__A : Tuple = hidden_dropout_prob
__A : List[str] = attention_probs_dropout_prob
__A : Dict = max_position_embeddings
__A : Tuple = type_vocab_size
__A : List[Any] = type_sequence_label_size
__A : Dict = initializer_range
__A : List[Any] = num_labels
__A : List[Any] = num_choices
__A : List[str] = scope
def UpperCAmelCase_ ( self ):
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : List[Any] = None
if self.use_input_mask:
__A : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__A : int = None
if self.use_token_type_ids:
__A : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A : Union[str, Any] = None
__A : List[str] = None
__A : List[Any] = None
if self.use_labels:
__A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A : Any = ids_tensor([self.batch_size] , self.num_choices )
__A : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self ):
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self ):
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) : int = self.prepare_config_and_inputs()
__A : List[str] = True
__A : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A ):
__A : Tuple = NezhaModel(config=_A )
model.to(_A )
model.eval()
__A : Dict = model(_A , attention_mask=_A , token_type_ids=_A )
__A : str = model(_A , token_type_ids=_A )
__A : Dict = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : Optional[int] = True
__A : List[str] = NezhaModel(_A )
model.to(_A )
model.eval()
__A : Dict = model(
_A , attention_mask=_A , token_type_ids=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , )
__A : str = model(
_A , attention_mask=_A , token_type_ids=_A , encoder_hidden_states=_A , )
__A : Dict = model(_A , attention_mask=_A , token_type_ids=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A ):
__A : Optional[int] = NezhaForMaskedLM(config=_A )
model.to(_A )
model.eval()
__A : int = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A ):
__A : List[Any] = NezhaForNextSentencePrediction(config=_A )
model.to(_A )
model.eval()
__A : Dict = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A ):
__A : Dict = NezhaForPreTraining(config=_A )
model.to(_A )
model.eval()
__A : int = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , next_sentence_label=_A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A ):
__A : int = NezhaForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
__A : str = model(
_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A ):
__A : Optional[int] = self.num_labels
__A : str = NezhaForSequenceClassification(_A )
model.to(_A )
model.eval()
__A : List[Any] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A ):
__A : str = self.num_labels
__A : Tuple = NezhaForTokenClassification(config=_A )
model.to(_A )
model.eval()
__A : int = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A ):
__A : Optional[Any] = self.num_choices
__A : Tuple = NezhaForMultipleChoice(config=_A )
model.to(_A )
model.eval()
__A : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A : Tuple = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self ):
__A : Optional[int] = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) : Optional[Any] = config_and_inputs
__A : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _A( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : List[Any] = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase : Union[str, Any] = (
{
'''feature-extraction''': NezhaModel,
'''fill-mask''': NezhaForMaskedLM,
'''question-answering''': NezhaForQuestionAnswering,
'''text-classification''': NezhaForSequenceClassification,
'''token-classification''': NezhaForTokenClassification,
'''zero-shot''': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase : Union[str, Any] = True
def UpperCAmelCase_ ( self , _A , _A , _A=False ):
__A : Tuple = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class in get_values(_A ):
__A : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_A )
__A : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def UpperCAmelCase_ ( self ):
__A : Tuple = NezhaModelTester(self )
__A : Any = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCAmelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
__A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self ):
__A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_A )
def UpperCAmelCase_ ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
__A : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(
_A , _A , _A , _A , _A , _A , _A , _A , _A , )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def UpperCAmelCase_ ( self ):
__A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*_A )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_A )
def UpperCAmelCase_ ( self ):
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def UpperCAmelCase_ ( self ):
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def UpperCAmelCase_ ( self ):
__A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def UpperCAmelCase_ ( self ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : Dict = NezhaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@slow
@require_torch_gpu
def UpperCAmelCase_ ( self ):
__A , __A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__A : int = True
__A : List[str] = model_class(config=_A )
__A : Dict = self._prepare_for_class(_A , _A )
__A : Optional[Any] = torch.jit.trace(
_A , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_A , os.path.join(_A , 'bert.pt' ) )
__A : Optional[Any] = torch.jit.load(os.path.join(_A , 'bert.pt' ) , map_location=_A )
loaded(inputs_dict['input_ids'].to(_A ) , inputs_dict['attention_mask'].to(_A ) )
@require_torch
class _A( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ):
__A : Optional[int] = NezhaModel.from_pretrained('sijunhe/nezha-cn-base' )
__A : Dict = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__A : int = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__A : List[str] = model(_A , attention_mask=_A )[0]
__A : Union[str, Any] = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , _A )
__A : Union[str, Any] = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) )
@slow
def UpperCAmelCase_ ( self ):
__A : Optional[int] = NezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base' )
__A : Optional[int] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__A : Optional[int] = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__A : Optional[int] = model(_A , attention_mask=_A )[0]
__A : Tuple = torch.Size((1, 6, 21128) )
self.assertEqual(output.shape , _A )
__A : int = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) )
| 280 |
def _SCREAMING_SNAKE_CASE ( a ) -> bool:
return str(a ) == str(a )[::-1]
def _SCREAMING_SNAKE_CASE ( a ) -> int:
return int(a ) + int(str(a )[::-1] )
def _SCREAMING_SNAKE_CASE ( a = 1_00_00 ) -> int:
__A : int = []
for num in range(1 , a ):
__A : List[str] = 0
__A : List[Any] = num
while iterations < 50:
__A : str = sum_reverse(a )
iterations += 1
if is_palindrome(a ):
break
else:
lychrel_nums.append(a )
return len(a )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 280 | 1 |
"""simple docstring"""
from copy import deepcopy
class lowerCamelCase__ :
def __init__( self ,A = None ,A = None ):
if arr is None and size is not None:
UpperCAmelCase = size
UpperCAmelCase = [0] * size
elif arr is not None:
self.init(A )
else:
raise ValueError("""Either arr or size must be specified""" )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = len(A )
UpperCAmelCase = deepcopy(A )
for i in range(1 ,self.size ):
UpperCAmelCase = self.next_(A )
if j < self.size:
self.tree[j] += self.tree[i]
def _UpperCamelCase ( self ):
UpperCAmelCase = self.tree[:]
for i in range(self.size - 1 ,0 ,-1 ):
UpperCAmelCase = self.next_(A )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def _UpperCamelCase ( A ):
return index + (index & (-index))
@staticmethod
def _UpperCamelCase ( A ):
return index - (index & (-index))
def _UpperCamelCase ( self ,A ,A ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
UpperCAmelCase = self.next_(A )
def _UpperCamelCase ( self ,A ,A ):
self.add(A ,value - self.get(A ) )
def _UpperCamelCase ( self ,A ):
if right == 0:
return 0
UpperCAmelCase = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
UpperCAmelCase = self.prev(A )
return result
def _UpperCamelCase ( self ,A ,A ):
return self.prefix(A ) - self.prefix(A )
def _UpperCamelCase ( self ,A ):
return self.query(A ,index + 1 )
def _UpperCamelCase ( self ,A ):
value -= self.tree[0]
if value < 0:
return -1
UpperCAmelCase = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
UpperCAmelCase = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 |
"""simple docstring"""
_UpperCamelCase = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCamelCase = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCamelCase = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
assert len(str(_snake_case ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
UpperCAmelCase = year // 100
UpperCAmelCase = (5 * (century % 4) + 2) % 7
UpperCAmelCase = year % 100
UpperCAmelCase = centurian % 12
UpperCAmelCase = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
UpperCAmelCase = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
UpperCAmelCase = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 234 | 0 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self , lowercase__ ) -> Union[str, Any]:
__UpperCAmelCase = 3
__UpperCAmelCase = 250
__UpperCAmelCase = ids_tensor((batch_size, length) , SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = torch.ones((batch_size, length) , device=SCREAMING_SNAKE_CASE_ , dtype=torch.float ) / length
return input_ids, scores
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase = self._get_tensors(5 )
__UpperCAmelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = MaxLengthCriteria(max_length=10 )
__UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
__UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__UpperCAmelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = self._get_tensors(5 )
__UpperCAmelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__UpperCAmelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def lowerCAmelCase_ (self ) -> Optional[Any]:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(SCREAMING_SNAKE_CASE_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
__UpperCAmelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 )
| 333 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = 0 ) -> list:
A: Dict = length or len(__lowercase )
A: Dict = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
A , A: Tuple = list_data[i + 1], list_data[i]
A: Union[str, Any] = True
return list_data if not swapped else bubble_sort(__lowercase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 0 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : Tuple =logging.get_logger(__name__)
def lowerCAmelCase__ ( lowerCamelCase_ : List[Any] ,lowerCamelCase_ : Optional[int] ,lowerCamelCase_ : int):
'''simple docstring'''
lowerCAmelCase__ : Tuple = os.path.abspath(lowerCamelCase_)
logger.info(f"""Converting TensorFlow checkpoint from {tf_path}""")
# Load weights from TF model
lowerCAmelCase__ : Dict = tf.train.list_variables(lowerCamelCase_)
lowerCAmelCase__ : str = []
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : List[str] = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
lowerCAmelCase__ : Tuple = full_name.split('''/''')
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f"""Skipping non-model layer {full_name}""")
continue
if "optimizer" in full_name:
logger.info(f"""Skipping optimization layer {full_name}""")
continue
if name[0] == "model":
# ignore initial 'model'
lowerCAmelCase__ : int = name[1:]
# figure out how many levels deep the name is
lowerCAmelCase__ : Dict = 0
for _name in name:
if _name.startswith('''layer_with_weights'''):
depth += 1
else:
break
layer_depth.append(lowerCamelCase_)
# read data
lowerCAmelCase__ : List[str] = tf.train.load_variable(lowerCamelCase_ ,lowerCamelCase_)
names.append('''/'''.join(lowerCamelCase_))
arrays.append(lowerCamelCase_)
logger.info(f"""Read a total of {len(lowerCamelCase_):,} layers""")
# Sanity check
if len(set(lowerCamelCase_)) != 1:
raise ValueError(f"""Found layer names with different depths (layer depth {list(set(lowerCamelCase_))})""")
lowerCAmelCase__ : Tuple = list(set(lowerCamelCase_))[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''')
# convert layers
logger.info('''Converting weights...''')
for full_name, array in zip(lowerCamelCase_ ,lowerCamelCase_):
lowerCAmelCase__ : Dict = full_name.split('''/''')
lowerCAmelCase__ : str = model
lowerCAmelCase__ : Tuple = []
for i, m_name in enumerate(lowerCamelCase_):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights'''):
lowerCAmelCase__ : Union[str, Any] = int(m_name.split('''-''')[-1])
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''])
lowerCAmelCase__ : str = getattr(lowerCamelCase_ ,'''embeddings''')
lowerCAmelCase__ : Any = getattr(lowerCamelCase_ ,'''LayerNorm''')
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4)])
lowerCAmelCase__ : List[Any] = getattr(lowerCamelCase_ ,'''encoder''')
lowerCAmelCase__ : Union[str, Any] = getattr(lowerCamelCase_ ,'''layer''')
lowerCAmelCase__ : Any = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''])
lowerCAmelCase__ : Optional[Any] = getattr(lowerCamelCase_ ,'''pooler''')
lowerCAmelCase__ : Optional[int] = getattr(lowerCamelCase_ ,'''dense''')
elif m_name == "embeddings":
trace.append('''embeddings''')
lowerCAmelCase__ : Any = getattr(lowerCamelCase_ ,'''embeddings''')
if layer_num == 0:
trace.append('''word_embeddings''')
lowerCAmelCase__ : Dict = getattr(lowerCamelCase_ ,'''word_embeddings''')
elif layer_num == 1:
trace.append('''position_embeddings''')
lowerCAmelCase__ : str = getattr(lowerCamelCase_ ,'''position_embeddings''')
elif layer_num == 2:
trace.append('''token_type_embeddings''')
lowerCAmelCase__ : Dict = getattr(lowerCamelCase_ ,'''token_type_embeddings''')
else:
raise ValueError(f"""Unknown embedding layer with name {full_name}""")
trace.append('''weight''')
lowerCAmelCase__ : Any = getattr(lowerCamelCase_ ,'''weight''')
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''])
lowerCAmelCase__ : Union[str, Any] = getattr(lowerCamelCase_ ,'''attention''')
lowerCAmelCase__ : Tuple = getattr(lowerCamelCase_ ,'''self''')
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''])
lowerCAmelCase__ : int = getattr(lowerCamelCase_ ,'''attention''')
lowerCAmelCase__ : Union[str, Any] = getattr(lowerCamelCase_ ,'''output''')
lowerCAmelCase__ : List[Any] = getattr(lowerCamelCase_ ,'''LayerNorm''')
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''])
lowerCAmelCase__ : Tuple = getattr(lowerCamelCase_ ,'''attention''')
lowerCAmelCase__ : Union[str, Any] = getattr(lowerCamelCase_ ,'''output''')
lowerCAmelCase__ : str = getattr(lowerCamelCase_ ,'''dense''')
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''])
lowerCAmelCase__ : List[Any] = getattr(lowerCamelCase_ ,'''output''')
lowerCAmelCase__ : Union[str, Any] = getattr(lowerCamelCase_ ,'''dense''')
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''])
lowerCAmelCase__ : List[Any] = getattr(lowerCamelCase_ ,'''output''')
lowerCAmelCase__ : Optional[Any] = getattr(lowerCamelCase_ ,'''LayerNorm''')
elif m_name == "_key_dense":
# attention key
trace.append('''key''')
lowerCAmelCase__ : Union[str, Any] = getattr(lowerCamelCase_ ,'''key''')
elif m_name == "_query_dense":
# attention query
trace.append('''query''')
lowerCAmelCase__ : List[str] = getattr(lowerCamelCase_ ,'''query''')
elif m_name == "_value_dense":
# attention value
trace.append('''value''')
lowerCAmelCase__ : Optional[Any] = getattr(lowerCamelCase_ ,'''value''')
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''])
lowerCAmelCase__ : List[Any] = getattr(lowerCamelCase_ ,'''intermediate''')
lowerCAmelCase__ : Tuple = getattr(lowerCamelCase_ ,'''dense''')
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''')
lowerCAmelCase__ : List[str] = getattr(lowerCamelCase_ ,'''output''')
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''')
lowerCAmelCase__ : Tuple = getattr(lowerCamelCase_ ,'''bias''')
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''')
lowerCAmelCase__ : str = getattr(lowerCamelCase_ ,'''weight''')
else:
logger.warning(f"""Ignored {m_name}""")
# for certain layers reshape is necessary
lowerCAmelCase__ : Optional[Any] = '''.'''.join(lowerCamelCase_)
if re.match(r'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' ,lowerCamelCase_) or re.match(
r'''(\S+)\.attention\.output\.dense\.weight''' ,lowerCamelCase_):
lowerCAmelCase__ : Any = array.reshape(pointer.data.shape)
if "kernel" in full_name:
lowerCAmelCase__ : int = array.transpose()
if pointer.shape == array.shape:
lowerCAmelCase__ : str = torch.from_numpy(lowerCamelCase_)
else:
raise ValueError(
f"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"""
f""" {array.shape}""")
logger.info(f"""Successfully set variable {full_name} to PyTorch layer {trace}""")
return model
def lowerCAmelCase__ ( lowerCamelCase_ : Any ,lowerCamelCase_ : int ,lowerCamelCase_ : int):
'''simple docstring'''
logger.info(f"""Loading model based on config from {config_path}...""")
lowerCAmelCase__ : Optional[int] = BertConfig.from_json_file(lowerCamelCase_)
lowerCAmelCase__ : Any = BertModel(lowerCamelCase_)
# Load weights from checkpoint
logger.info(f"""Loading weights from checkpoint {tf_checkpoint_path}...""")
load_tfa_weights_in_bert(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
# Save pytorch-model
logger.info(f"""Saving PyTorch model to {pytorch_dump_path}...""")
torch.save(model.state_dict() ,lowerCamelCase_)
if __name__ == "__main__":
__snake_case : List[str] =argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model (must include filename).',
)
__snake_case : Tuple =parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 94 |
__snake_case : Any ='\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__snake_case : Tuple =[{'type': 'code', 'content': INSTALL_CONTENT}]
__snake_case : Tuple ={
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 94 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 'deberta-v2'
def __init__( self : int,lowercase_ : List[str]=1_2_8_1_0_0,lowercase_ : Union[str, Any]=1_5_3_6,lowercase_ : Any=2_4,lowercase_ : Optional[int]=2_4,lowercase_ : Tuple=6_1_4_4,lowercase_ : Dict="gelu",lowercase_ : str=0.1,lowercase_ : List[Any]=0.1,lowercase_ : int=5_1_2,lowercase_ : Any=0,lowercase_ : Optional[int]=0.02,lowercase_ : List[str]=1E-7,lowercase_ : int=False,lowercase_ : int=-1,lowercase_ : str=0,lowercase_ : Tuple=True,lowercase_ : Dict=None,lowercase_ : int=0,lowercase_ : Tuple="gelu",**lowercase_ : List[Any],)-> Union[str, Any]:
'''simple docstring'''
super().__init__(**lowercase_ )
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = relative_attention
A__ = max_relative_positions
A__ = pad_token_id
A__ = position_biased_input
# Backwards compatibility
if type(lowercase_ ) == str:
A__ = [x.strip() for x in pos_att_type.lower().split('|' )]
A__ = pos_att_type
A__ = vocab_size
A__ = layer_norm_eps
A__ = kwargs.get('pooler_hidden_size',lowercase_ )
A__ = pooler_dropout
A__ = pooler_hidden_act
class A ( _UpperCAmelCase ):
"""simple docstring"""
@property
def snake_case__ ( self : int )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A__ = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def snake_case__ ( self : List[Any] )-> int:
'''simple docstring'''
return 1_2
def snake_case__ ( self : Dict,lowercase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],lowercase_ : int = -1,lowercase_ : int = -1,lowercase_ : int = -1,lowercase_ : bool = False,lowercase_ : Optional["TensorType"] = None,lowercase_ : int = 3,lowercase_ : int = 4_0,lowercase_ : int = 4_0,lowercase_ : "PreTrainedTokenizerBase" = None,)-> Mapping[str, Any]:
'''simple docstring'''
A__ = super().generate_dummy_inputs(preprocessor=lowercase_,framework=lowercase_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 7 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: Optional[Any] = ["pixel_values"]
def __init__( self : str , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PIL.Image.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : str , ) -> None:
"""simple docstring"""
super().__init__(**_A )
snake_case_ : Dict = size if size is not None else {'height': 256, 'width': 256}
snake_case_ : Tuple = get_size_dict(_A )
snake_case_ : str = crop_size if crop_size is not None else {'height': 224, 'width': 224}
snake_case_ : int = get_size_dict(_A , param_name='crop_size' )
snake_case_ : Union[str, Any] = do_resize
snake_case_ : str = size
snake_case_ : List[str] = resample
snake_case_ : List[Any] = do_center_crop
snake_case_ : Dict = crop_size
snake_case_ : Tuple = do_rescale
snake_case_ : Optional[Any] = rescale_factor
snake_case_ : Any = do_normalize
snake_case_ : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase_ ( self : Optional[int] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PIL.Image.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[str] , ) -> np.ndarray:
"""simple docstring"""
snake_case_ : Tuple = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
_A , size=(size['height'], size['width']) , resample=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self : int , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
snake_case_ : Optional[int] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(_A , size=(size['height'], size['width']) , data_format=_A , **_A )
def UpperCAmelCase_ ( self : Dict , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : str , ) -> str:
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self : Any , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> np.ndarray:
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self : List[str] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : Union[str, Any]=None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : int , ) -> PIL.Image.Image:
"""simple docstring"""
snake_case_ : int = do_resize if do_resize is not None else self.do_resize
snake_case_ : str = resample if resample is not None else self.resample
snake_case_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Any = image_mean if image_mean is not None else self.image_mean
snake_case_ : Dict = image_std if image_std is not None else self.image_std
snake_case_ : int = size if size is not None else self.size
snake_case_ : Optional[int] = get_size_dict(_A )
snake_case_ : int = crop_size if crop_size is not None else self.crop_size
snake_case_ : Any = get_size_dict(_A , param_name='crop_size' )
snake_case_ : Optional[Any] = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
snake_case_ : Optional[Any] = [to_numpy_array(_A ) for image in images]
if do_resize:
snake_case_ : Dict = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_center_crop:
snake_case_ : Optional[Any] = [self.center_crop(image=_A , size=_A ) for image in images]
if do_rescale:
snake_case_ : Optional[int] = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
snake_case_ : str = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
snake_case_ : Dict = [to_channel_dimension_format(_A , _A ) for image in images]
snake_case_ : Tuple = {'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A )
| 327 | 0 |
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
A_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
A_ = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
A_ = '''pt''' if is_torch_available() else '''tf'''
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ):
snake_case_ = CamembertTokenizer
snake_case_ = CamembertTokenizerFast
snake_case_ = True
snake_case_ = True
def _UpperCamelCase ( self : int ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A__ : List[str] = CamembertTokenizer(snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : str = """<pad>"""
A__ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(snake_case ) , 1004 )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Optional[int] = CamembertTokenizer(snake_case )
tokenizer.save_pretrained(self.tmpdirname )
A__ : Optional[Any] = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
A__ : Tuple = """I was born in 92000, and this is falsé."""
A__ : Optional[Any] = tokenizer.encode(snake_case )
A__ : List[Any] = rust_tokenizer.encode(snake_case )
self.assertListEqual(snake_case , snake_case )
A__ : Tuple = tokenizer.encode(snake_case , add_special_tokens=snake_case )
A__ : List[str] = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
A__ : List[Any] = tokenizer.convert_ids_to_tokens(snake_case )
A__ : Tuple = rust_tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
A__ : Any = self.get_tokenizer()
A__ : Tuple = self.get_rust_tokenizer()
A__ : Dict = """I was born in 92000, and this is falsé."""
A__ : Optional[int] = tokenizer.tokenize(snake_case )
A__ : int = rust_tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
A__ : List[Any] = tokenizer.encode(snake_case , add_special_tokens=snake_case )
A__ : Tuple = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
A__ : int = self.get_rust_tokenizer()
A__ : Tuple = tokenizer.encode(snake_case )
A__ : Union[str, Any] = rust_tokenizer.encode(snake_case )
self.assertListEqual(snake_case , snake_case )
@slow
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : Union[str, Any] = {"""input_ids""": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
A__ : Tuple = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=snake_case , )
| 296 |
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
A_ = '''src/diffusers'''
A_ = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
A_ = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
A_ = spec.loader.load_module()
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Optional[Any] ) ->Any:
return line.startswith(UpperCAmelCase__ ) or len(UpperCAmelCase__ ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""", UpperCAmelCase__ ) is not None
def _lowerCAmelCase ( UpperCAmelCase__ : List[str] ) ->Union[str, Any]:
A__ : Any = object_name.split(""".""" )
A__ : int = 0
# First let's find the module where our object lives.
A__ : str = parts[i]
while i < len(UpperCAmelCase__ ) and not os.path.isfile(os.path.join(UpperCAmelCase__, f'{module}.py' ) ):
i += 1
if i < len(UpperCAmelCase__ ):
A__ : Union[str, Any] = os.path.join(UpperCAmelCase__, parts[i] )
if i >= len(UpperCAmelCase__ ):
raise ValueError(f'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(UpperCAmelCase__, f'{module}.py' ), """r""", encoding="""utf-8""", newline="""\n""" ) as f:
A__ : List[Any] = f.readlines()
# Now let's find the class / func in the code!
A__ : Optional[Any] = """"""
A__ : Any = 0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCAmelCase__ ) and re.search(Rf'^{indent}(class|def)\s+{name}(\(|\:)', lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCAmelCase__ ):
raise ValueError(f' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
A__ : List[Any] = line_index
while line_index < len(UpperCAmelCase__ ) and _should_continue(lines[line_index], UpperCAmelCase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A__ : List[Any] = lines[start_index:line_index]
return "".join(UpperCAmelCase__ )
A_ = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
A_ = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
A_ = re.compile(r'''<FILL\s+[^>]*>''')
def _lowerCAmelCase ( UpperCAmelCase__ : List[str] ) ->Optional[Any]:
A__ : Dict = code.split("""\n""" )
A__ : List[Any] = 0
while idx < len(UpperCAmelCase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(UpperCAmelCase__ ):
return re.search(R"""^(\s*)\S""", lines[idx] ).groups()[0]
return ""
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any] ) ->int:
A__ : str = len(get_indent(UpperCAmelCase__ ) ) > 0
if has_indent:
A__ : Union[str, Any] = f'class Bla:\n{code}'
A__ : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=1_1_9, preview=UpperCAmelCase__ )
A__ : Tuple = black.format_str(UpperCAmelCase__, mode=UpperCAmelCase__ )
A__ , A__ : List[Any] = style_docstrings_in_code(UpperCAmelCase__ )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def _lowerCAmelCase ( UpperCAmelCase__ : Any, UpperCAmelCase__ : Dict=False ) ->List[Any]:
with open(UpperCAmelCase__, """r""", encoding="""utf-8""", newline="""\n""" ) as f:
A__ : int = f.readlines()
A__ : Dict = []
A__ : List[str] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCAmelCase__ ):
A__ : Dict = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
A__ , A__ , A__ : Dict = search.groups()
A__ : Tuple = find_code_in_diffusers(UpperCAmelCase__ )
A__ : int = get_indent(UpperCAmelCase__ )
A__ : List[str] = line_index + 1 if indent == theoretical_indent else line_index + 2
A__ : Tuple = theoretical_indent
A__ : Optional[Any] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
A__ : Tuple = True
while line_index < len(UpperCAmelCase__ ) and should_continue:
line_index += 1
if line_index >= len(UpperCAmelCase__ ):
break
A__ : Optional[int] = lines[line_index]
A__ : Tuple = _should_continue(UpperCAmelCase__, UpperCAmelCase__ ) and re.search(f'^{indent}# End copy', UpperCAmelCase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A__ : Dict = lines[start_index:line_index]
A__ : Tuple = """""".join(UpperCAmelCase__ )
# Remove any nested `Copied from` comments to avoid circular copies
A__ : Optional[int] = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(UpperCAmelCase__ ) is None]
A__ : Optional[Any] = """\n""".join(UpperCAmelCase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCAmelCase__ ) > 0:
A__ : int = replace_pattern.replace("""with""", """""" ).split(""",""" )
A__ : List[Any] = [_re_replace_pattern.search(UpperCAmelCase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
A__ , A__ , A__ : Union[str, Any] = pattern.groups()
A__ : Union[str, Any] = re.sub(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
if option.strip() == "all-casing":
A__ : List[Any] = re.sub(obja.lower(), obja.lower(), UpperCAmelCase__ )
A__ : Tuple = re.sub(obja.upper(), obja.upper(), UpperCAmelCase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
A__ : Optional[int] = blackify(lines[start_index - 1] + theoretical_code )
A__ : List[Any] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
A__ : List[Any] = lines[:start_index] + [theoretical_code] + lines[line_index:]
A__ : Tuple = start_index + 1
if overwrite and len(UpperCAmelCase__ ) > 0:
# Warn the user a file has been modified.
print(f'Detected changes, rewriting {filename}.' )
with open(UpperCAmelCase__, """w""", encoding="""utf-8""", newline="""\n""" ) as f:
f.writelines(UpperCAmelCase__ )
return diffs
def _lowerCAmelCase ( UpperCAmelCase__ : bool = False ) ->Any:
A__ : Dict = glob.glob(os.path.join(UpperCAmelCase__, """**/*.py""" ), recursive=UpperCAmelCase__ )
A__ : str = []
for filename in all_files:
A__ : Any = is_copy_consistent(UpperCAmelCase__, UpperCAmelCase__ )
diffs += [f'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(UpperCAmelCase__ ) > 0:
A__ : Any = """\n""".join(UpperCAmelCase__ )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
A_ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 296 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCAmelCase : str = {
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = '''trocr'''
__SCREAMING_SNAKE_CASE : List[str] = ['''past_key_values''']
__SCREAMING_SNAKE_CASE : Tuple = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self , snake_case=5_0265 , snake_case=1024 , snake_case=12 , snake_case=16 , snake_case=4096 , snake_case="gelu" , snake_case=512 , snake_case=0.1 , snake_case=0.0 , snake_case=0.0 , snake_case=2 , snake_case=0.02 , snake_case=0.0 , snake_case=True , snake_case=False , snake_case=True , snake_case=True , snake_case=1 , snake_case=0 , snake_case=2 , **snake_case , ):
snake_case_ = vocab_size
snake_case_ = d_model
snake_case_ = decoder_layers
snake_case_ = decoder_attention_heads
snake_case_ = decoder_ffn_dim
snake_case_ = activation_function
snake_case_ = max_position_embeddings
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = init_std
snake_case_ = decoder_layerdrop
snake_case_ = use_cache
snake_case_ = scale_embedding
snake_case_ = use_learned_position_embeddings
snake_case_ = layernorm_embedding
super().__init__(
pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , decoder_start_token_id=snake_case , **snake_case , )
| 285 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''input_features''', '''is_longer''']
def __init__( self , snake_case=64 , snake_case=4_8000 , snake_case=480 , snake_case=10 , snake_case=1024 , snake_case=0.0 , snake_case=False , snake_case = 0 , snake_case = 1_4000 , snake_case = None , snake_case = "fusion" , snake_case = "repeatpad" , **snake_case , ):
super().__init__(
feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , return_attention_mask=snake_case , **snake_case , )
snake_case_ = top_db
snake_case_ = truncation
snake_case_ = padding
snake_case_ = fft_window_size
snake_case_ = (fft_window_size >> 1) + 1
snake_case_ = hop_length
snake_case_ = max_length_s
snake_case_ = max_length_s * sampling_rate
snake_case_ = sampling_rate
snake_case_ = frequency_min
snake_case_ = frequency_max
snake_case_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm=snake_case , mel_scale='htk' , )
snake_case_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm='slaney' , mel_scale='slaney' , )
def a ( self ):
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def a ( self , snake_case , snake_case = None ):
snake_case_ = spectrogram(
snake_case , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case , log_mel='dB' , )
return log_mel_spectrogram.T
def a ( self , snake_case , snake_case , snake_case ):
snake_case_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
snake_case_ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
snake_case_ = [0]
# randomly choose index for each part
snake_case_ = np.random.choice(ranges[0] )
snake_case_ = np.random.choice(ranges[1] )
snake_case_ = np.random.choice(ranges[2] )
snake_case_ = mel[idx_front : idx_front + chunk_frames, :]
snake_case_ = mel[idx_middle : idx_middle + chunk_frames, :]
snake_case_ = mel[idx_back : idx_back + chunk_frames, :]
snake_case_ = torch.tensor(mel[None, None, :] )
snake_case_ = torch.nn.functional.interpolate(
snake_case , size=[chunk_frames, 64] , mode='bilinear' , align_corners=snake_case )
snake_case_ = mel_shrink[0][0].numpy()
snake_case_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def a ( self , snake_case , snake_case , snake_case , snake_case ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
snake_case_ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
snake_case_ = len(snake_case ) - max_length
snake_case_ = np.random.randint(0 , overflow + 1 )
snake_case_ = waveform[idx : idx + max_length]
snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters )
snake_case_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
snake_case_ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
snake_case_ = np.stack([mel, mel, mel, mel] , axis=0 )
snake_case_ = False
else:
snake_case_ = self._random_mel_fusion(snake_case , snake_case , snake_case )
snake_case_ = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
snake_case_ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
snake_case_ = int(max_length / len(snake_case ) )
snake_case_ = np.stack(np.tile(snake_case , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
snake_case_ = int(max_length / len(snake_case ) )
snake_case_ = np.stack(np.tile(snake_case , snake_case ) )
snake_case_ = np.pad(snake_case , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters )
snake_case_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , **snake_case , ):
snake_case_ = truncation if truncation is not None else self.truncation
snake_case_ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
snake_case_ = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
snake_case_ = is_batched_numpy or (
isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
snake_case_ = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case , np.ndarray ):
snake_case_ = np.asarray(snake_case , dtype=np.floataa )
elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
snake_case_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case_ = [np.asarray(snake_case )]
# convert to mel spectrogram, truncate and pad if needed.
snake_case_ = [
self._get_input_mel(snake_case , max_length if max_length else self.nb_max_samples , snake_case , snake_case )
for waveform in raw_speech
]
snake_case_ = []
snake_case_ = []
for mel, longer in padded_inputs:
input_mel.append(snake_case )
is_longer.append(snake_case )
if truncation == "fusion" and sum(snake_case ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
snake_case_ = np.random.randint(0 , len(snake_case ) )
snake_case_ = True
if isinstance(input_mel[0] , snake_case ):
snake_case_ = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
snake_case_ = [[longer] for longer in is_longer]
snake_case_ = {'input_features': input_mel, 'is_longer': is_longer}
snake_case_ = BatchFeature(snake_case )
if return_tensors is not None:
snake_case_ = input_features.convert_to_tensors(snake_case )
return input_features
| 285 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase: Tuple = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: Optional[int] = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: List[str] = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: List[Any] = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: Tuple = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: str = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase: List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 368 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase: Optional[int] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = XLNetTokenizer
SCREAMING_SNAKE_CASE_ : Dict = XLNetTokenizerFast
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
def lowerCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = """<s>"""
_lowercase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) ,UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<unk>""" )
self.assertEqual(vocab_keys[1] ,"""<s>""" )
self.assertEqual(vocab_keys[-1] ,"""<eod>""" )
self.assertEqual(len(UpperCAmelCase_ ) ,10_06 )
def lowerCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,10_00 )
def lowerCamelCase__ ( self ):
_lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ )
_lowercase : Union[str, Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCAmelCase_ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) ,[2_85, 46, 10, 1_70, 3_82] )
_lowercase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
_lowercase : List[Any] = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,[8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
_lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
def lowerCamelCase__ ( self ):
_lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] ,)
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""▁he""", """ll""", """o"""] )
def lowerCamelCase__ ( self ):
_lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] ,)
@slow
def lowerCamelCase__ ( self ):
_lowercase : Dict = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
_lowercase : int = tokenizer.encode("""sequence builders""" ,add_special_tokens=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=UpperCAmelCase_ )
_lowercase : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
_lowercase : Dict = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ,UpperCAmelCase_ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowerCamelCase__ ( self ):
# fmt: off
_lowercase : Union[str, Any] = {"""input_ids""": [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ ,model_name="""xlnet-base-cased""" ,revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" ,)
| 336 | 0 |
"""simple docstring"""
from math import sqrt
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
__SCREAMING_SNAKE_CASE = True
# 0 and 1 are none primes.
if number <= 1:
__SCREAMING_SNAKE_CASE = False
for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__SCREAMING_SNAKE_CASE = False
break
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool"
return status
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__SCREAMING_SNAKE_CASE = list(range(2 , n + 1 ) )
__SCREAMING_SNAKE_CASE = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__SCREAMING_SNAKE_CASE = 0
# filters actual prime numbers.
__SCREAMING_SNAKE_CASE = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
__SCREAMING_SNAKE_CASE = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCAmelCase_ ):
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0"
__SCREAMING_SNAKE_CASE = [] # this list will be returns of the function.
# potential prime number factors.
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase_ ):
while quotient != 1:
if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0):
ans.append(lowerCAmelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__SCREAMING_SNAKE_CASE = 0
# prime factorization of 'number'
__SCREAMING_SNAKE_CASE = prime_factorization(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = max(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__SCREAMING_SNAKE_CASE = 0
# prime factorization of 'number'
__SCREAMING_SNAKE_CASE = prime_factorization(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = min(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ )
), "'number' must been an int, even and > 2"
__SCREAMING_SNAKE_CASE = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__SCREAMING_SNAKE_CASE = get_prime_numbers(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ )
# run variable for while-loops.
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = None
# exit variable. for break up the loops
__SCREAMING_SNAKE_CASE = True
while i < len_pn and loop:
__SCREAMING_SNAKE_CASE = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__SCREAMING_SNAKE_CASE = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (len(lowerCAmelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__SCREAMING_SNAKE_CASE = 0
while numbera != 0:
__SCREAMING_SNAKE_CASE = numbera % numbera
__SCREAMING_SNAKE_CASE = numbera
__SCREAMING_SNAKE_CASE = rest
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__SCREAMING_SNAKE_CASE = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__SCREAMING_SNAKE_CASE = prime_factorization(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = prime_factorization(lowerCAmelCase_ )
elif numbera == 1 or numbera == 1:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = max(lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__SCREAMING_SNAKE_CASE = prime_fac_a.count(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ):
ans *= n
else:
__SCREAMING_SNAKE_CASE = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__SCREAMING_SNAKE_CASE = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int"
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime(
lowerCAmelCase_ ), "'ans' must been a prime number and from type int"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
assert (
is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__SCREAMING_SNAKE_CASE = p_number_a + 1 # jump to the next number
__SCREAMING_SNAKE_CASE = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1"
__SCREAMING_SNAKE_CASE = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
__SCREAMING_SNAKE_CASE = get_divisors(lowerCAmelCase_ )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__SCREAMING_SNAKE_CASE = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
__SCREAMING_SNAKE_CASE = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 1 # this will be return
for _ in range(n - 1 ):
__SCREAMING_SNAKE_CASE = ans
ans += fiba
__SCREAMING_SNAKE_CASE = tmp
return ans
| 54 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ):
'''simple docstring'''
if start is None:
__SCREAMING_SNAKE_CASE = 0
if end is None:
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ ) - 1
if start >= end:
return
__SCREAMING_SNAKE_CASE = (start + end) // 2
slowsort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
slowsort(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ )
if sequence[end] < sequence[mid]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sequence[mid], sequence[end]
slowsort(lowerCAmelCase_ , lowerCAmelCase_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 54 | 1 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] ):
__magic_name__ = ''''''
__magic_name__ = ''''''
__magic_name__ = []
__magic_name__ = 0
__magic_name__ = 256
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = 0
def snake_case__ ( self : Union[str, Any] , a__ : Dict ):
__magic_name__ = cva.imread(a__ , 0 )
__magic_name__ = copy.deepcopy(self.img )
__magic_name__ , __magic_name__ , __magic_name__ = plt.hist(self.img.ravel() , 256 , [0, 256] , label='''x''' )
__magic_name__ = np.sum(a__ )
for i in range(len(a__ ) ):
__magic_name__ = x[i] / self.k
self.sk += prk
__magic_name__ = (self.L - 1) * self.sk
if self.rem != 0:
__magic_name__ = int(last % last )
__magic_name__ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(a__ )
__magic_name__ = int(np.ma.count(self.img ) / self.img[1].size )
__magic_name__ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
__magic_name__ = self.img[j][i]
if num != self.last_list[num]:
__magic_name__ = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def snake_case__ ( self : List[Any] ):
plt.hist(self.img.ravel() , 256 , [0, 256] )
def snake_case__ ( self : Tuple ):
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
_lowerCAmelCase = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
_lowerCAmelCase = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 98 |
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def UpperCamelCase ( a ) -> str:
'''simple docstring'''
__magic_name__ = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
__magic_name__ = MaskFormerConfig(backbone_config=a )
__magic_name__ = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
__magic_name__ = 847
__magic_name__ = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
__magic_name__ = 150
__magic_name__ = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
__magic_name__ = 171
__magic_name__ = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
__magic_name__ = 133
__magic_name__ = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
__magic_name__ = 19
__magic_name__ = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
__magic_name__ = 65
__magic_name__ = '''mapillary-vistas-id2label.json'''
__magic_name__ = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) )
__magic_name__ = {int(a ): v for k, v in idalabel.items()}
return config
def UpperCamelCase ( a ) -> Tuple:
'''simple docstring'''
__magic_name__ = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.layers.{i}.downsample.reduction.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', F'''mask_embedder.{i}.0.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', F'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def UpperCamelCase ( a , a , a ) -> str:
'''simple docstring'''
__magic_name__ = dct.pop(a )
__magic_name__ = val
def UpperCamelCase ( a , a ) -> List[str]:
'''simple docstring'''
__magic_name__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__magic_name__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__magic_name__ = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
__magic_name__ = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ = in_proj_weight[:dim, :]
__magic_name__ = in_proj_bias[: dim]
__magic_name__ = in_proj_weight[
dim : dim * 2, :
]
__magic_name__ = in_proj_bias[
dim : dim * 2
]
__magic_name__ = in_proj_weight[
-dim :, :
]
__magic_name__ = in_proj_bias[-dim :]
# fmt: on
def UpperCamelCase ( a , a ) -> int:
'''simple docstring'''
# fmt: off
__magic_name__ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__magic_name__ = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
__magic_name__ = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ = in_proj_weight[: hidden_size, :]
__magic_name__ = in_proj_bias[:config.hidden_size]
__magic_name__ = in_proj_weight[hidden_size : hidden_size * 2, :]
__magic_name__ = in_proj_bias[hidden_size : hidden_size * 2]
__magic_name__ = in_proj_weight[-hidden_size :, :]
__magic_name__ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__magic_name__ = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
__magic_name__ = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ = in_proj_weight[: hidden_size, :]
__magic_name__ = in_proj_bias[:config.hidden_size]
__magic_name__ = in_proj_weight[hidden_size : hidden_size * 2, :]
__magic_name__ = in_proj_bias[hidden_size : hidden_size * 2]
__magic_name__ = in_proj_weight[-hidden_size :, :]
__magic_name__ = in_proj_bias[-hidden_size :]
# fmt: on
def UpperCamelCase ( ) -> torch.Tensor:
'''simple docstring'''
__magic_name__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__magic_name__ = Image.open(requests.get(a , stream=a ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( a , a , a , a = False ) -> Dict:
'''simple docstring'''
__magic_name__ = get_maskformer_config(a )
# load original state_dict
with open(a , '''rb''' ) as f:
__magic_name__ = pickle.load(a )
__magic_name__ = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__magic_name__ = create_rename_keys(a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_swin_q_k_v(a , config.backbone_config )
read_in_decoder_q_k_v(a , a )
# update to torch tensors
for key, value in state_dict.items():
__magic_name__ = torch.from_numpy(a )
# load 🤗 model
__magic_name__ = MaskFormerForInstanceSegmentation(a )
model.eval()
for name, param in model.named_parameters():
print(a , param.shape )
__magic_name__ , __magic_name__ = model.load_state_dict(a , strict=a )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(a ) == 0, F'''Unexpected keys: {unexpected_keys}'''
# verify results
__magic_name__ = prepare_img()
if "vistas" in model_name:
__magic_name__ = 65
elif "cityscapes" in model_name:
__magic_name__ = 6_5535
else:
__magic_name__ = 255
__magic_name__ = True if '''ade''' in model_name else False
__magic_name__ = MaskFormerImageProcessor(ignore_index=a , reduce_labels=a )
__magic_name__ = image_processor(a , return_tensors='''pt''' )
__magic_name__ = model(**a )
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__magic_name__ = torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , a , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
image_processor.save_pretrained(a )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F'''nielsr/{model_name}''' )
image_processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you'd like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowerCAmelCase = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 98 | 1 |
'''simple docstring'''
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _UpperCamelCase ( __A = 100 ) -> int:
'''simple docstring'''
UpperCamelCase__ = 1
UpperCamelCase__ = 2
for i in range(2 , max_n + 1 ):
UpperCamelCase__ = pre_numerator
UpperCamelCase__ = 2 * i // 3 if i % 3 == 0 else 1
UpperCamelCase__ = cur_numerator
UpperCamelCase__ = e_cont * pre_numerator + temp
return sum_digits(__A )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 80 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _a :
def __init__( self : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple=13 , _SCREAMING_SNAKE_CASE : Tuple=32 , _SCREAMING_SNAKE_CASE : Dict=2 , _SCREAMING_SNAKE_CASE : List[Any]=3 , _SCREAMING_SNAKE_CASE : str=16 , _SCREAMING_SNAKE_CASE : Union[str, Any]=[1, 2, 1] , _SCREAMING_SNAKE_CASE : List[Any]=[2, 2, 4] , _SCREAMING_SNAKE_CASE : str=2 , _SCREAMING_SNAKE_CASE : Optional[int]=2.0 , _SCREAMING_SNAKE_CASE : Tuple=True , _SCREAMING_SNAKE_CASE : Dict=0.0 , _SCREAMING_SNAKE_CASE : str=0.0 , _SCREAMING_SNAKE_CASE : List[str]=0.1 , _SCREAMING_SNAKE_CASE : Tuple="gelu" , _SCREAMING_SNAKE_CASE : str=False , _SCREAMING_SNAKE_CASE : Dict=True , _SCREAMING_SNAKE_CASE : List[Any]=0.02 , _SCREAMING_SNAKE_CASE : Any=1E-5 , _SCREAMING_SNAKE_CASE : Tuple=True , _SCREAMING_SNAKE_CASE : Union[str, Any]=None , _SCREAMING_SNAKE_CASE : Dict=True , _SCREAMING_SNAKE_CASE : Any=10 , _SCREAMING_SNAKE_CASE : Union[str, Any]=8 , )-> Dict:
lowerCAmelCase__ : Optional[Any] = parent
lowerCAmelCase__ : Optional[int] = batch_size
lowerCAmelCase__ : Tuple = image_size
lowerCAmelCase__ : Optional[Any] = patch_size
lowerCAmelCase__ : Dict = num_channels
lowerCAmelCase__ : Dict = embed_dim
lowerCAmelCase__ : Optional[Any] = depths
lowerCAmelCase__ : Tuple = num_heads
lowerCAmelCase__ : Dict = window_size
lowerCAmelCase__ : List[str] = mlp_ratio
lowerCAmelCase__ : str = qkv_bias
lowerCAmelCase__ : List[Any] = hidden_dropout_prob
lowerCAmelCase__ : int = attention_probs_dropout_prob
lowerCAmelCase__ : Tuple = drop_path_rate
lowerCAmelCase__ : Dict = hidden_act
lowerCAmelCase__ : Tuple = use_absolute_embeddings
lowerCAmelCase__ : int = patch_norm
lowerCAmelCase__ : Optional[int] = layer_norm_eps
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : Dict = is_training
lowerCAmelCase__ : Any = scope
lowerCAmelCase__ : int = use_labels
lowerCAmelCase__ : Tuple = type_sequence_label_size
lowerCAmelCase__ : Any = encoder_stride
def UpperCAmelCase__( self : str )-> Optional[int]:
lowerCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Dict = None
if self.use_labels:
lowerCAmelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__( self : Optional[int] )-> str:
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any )-> int:
lowerCAmelCase__ : Union[str, Any] = SwinvaModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase__ : List[str] = model(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCAmelCase__ : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase__( self : Any , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any )-> List[Any]:
lowerCAmelCase__ : Optional[int] = SwinvaForMaskedImageModeling(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase__ : Any = 1
lowerCAmelCase__ : Dict = SwinvaForMaskedImageModeling(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ : Optional[int] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] )-> Union[str, Any]:
lowerCAmelCase__ : Tuple = self.type_sequence_label_size
lowerCAmelCase__ : Optional[Any] = SwinvaForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase__ : Any = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__( self : Tuple )-> str:
lowerCAmelCase__ : int = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = config_and_inputs
lowerCAmelCase__ : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _a ( _lowercase , _lowercase , unittest.TestCase):
_a : str = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
_a : Tuple = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
_a : List[str] = False
_a : int = False
_a : Optional[int] = False
_a : Optional[Any] = False
def UpperCAmelCase__( self : str )-> Optional[Any]:
lowerCAmelCase__ : Tuple = SwinvaModelTester(self )
lowerCAmelCase__ : Any = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 )
def UpperCAmelCase__( self : str )-> int:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__( self : Optional[int] )-> Optional[Any]:
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def UpperCAmelCase__( self : Optional[Any] )-> Dict:
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def UpperCAmelCase__( self : Tuple )-> Optional[int]:
pass
def UpperCAmelCase__( self : List[Any] )-> List[str]:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def UpperCAmelCase__( self : Any )-> Dict:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Dict = model_class(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Tuple = [*signature.parameters.keys()]
lowerCAmelCase__ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Union[str, Any] )-> Dict:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Tuple = True
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : Union[str, Any] = False
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : str = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ : List[str] = outputs.attentions
lowerCAmelCase__ : Union[str, Any] = len(self.model_tester.depths )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Dict = config.window_size**2
lowerCAmelCase__ : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ : str = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
lowerCAmelCase__ : int = len(_SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
lowerCAmelCase__ : str = True
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : int = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
lowerCAmelCase__ : List[Any] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowerCAmelCase__ : str = 2
self.assertEqual(out_len + added_hidden_states , len(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ : List[Any] = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def UpperCAmelCase__( self : Dict , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] )-> Tuple:
lowerCAmelCase__ : Any = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Any = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ : str = outputs.hidden_states
lowerCAmelCase__ : Optional[int] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# Swinv2 has a different seq_length
lowerCAmelCase__ : List[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCAmelCase__ : Dict = outputs.reshaped_hidden_states
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = reshaped_hidden_states[0].shape
lowerCAmelCase__ : Tuple = (
reshaped_hidden_states[0].view(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCAmelCase__( self : Tuple )-> List[Any]:
lowerCAmelCase__ , lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCAmelCase__ : Any = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : Any = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Any )-> Tuple:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[int] = 3
lowerCAmelCase__ : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCAmelCase__ : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCAmelCase__ : Tuple = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCAmelCase__ : Optional[Any] = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : Tuple = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
def UpperCAmelCase__( self : Dict )-> Optional[Any]:
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : str )-> Optional[Any]:
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__( self : Optional[Any] )-> int:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Optional[Any] = SwinvaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Dict )-> List[str]:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Dict = _config_zero_init(_SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[str] = model_class(config=_SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class _a ( unittest.TestCase):
@cached_property
def UpperCAmelCase__( self : Tuple )-> Optional[Any]:
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__( self : List[Any] )-> List[str]:
lowerCAmelCase__ : Any = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[Any] = self.default_image_processor
lowerCAmelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase__ : List[str] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
lowerCAmelCase__ : Any = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 131 | 0 |
"""simple docstring"""
def lowercase__ ( _UpperCAmelCase = 10**12 ) -> int:
'''simple docstring'''
lowercase : Union[str, Any] = 1
lowercase : Dict = 0
lowercase : int = 1
lowercase : Any = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'''{solution() = }''')
| 53 |
"""simple docstring"""
def lowercase__ ( _UpperCAmelCase ) -> list:
'''simple docstring'''
lowercase : Dict = len(_UpperCAmelCase )
for i in range(1 , _UpperCAmelCase ):
lowercase : Union[str, Any] = collection[i]
lowercase : List[str] = 0
lowercase : Optional[int] = i - 1
while low <= high:
lowercase : List[str] = (low + high) // 2
if val < collection[mid]:
lowercase : List[Any] = mid - 1
else:
lowercase : int = mid + 1
for j in range(_UpperCAmelCase , _UpperCAmelCase , -1 ):
lowercase : List[str] = collection[j - 1]
lowercase : str = val
return collection
if __name__ == "__main__":
_UpperCamelCase: Optional[int] = input('Enter numbers separated by a comma:\n').strip()
_UpperCamelCase: List[Any] = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 53 | 1 |
import re
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Tuple = re.compile(r'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(_A , _A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("+918827897895"))
| 275 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : Dict = logging.get_logger(__name__)
def __UpperCamelCase ( _A : Union[str, Any] ) ->List[str]:
"""simple docstring"""
lowerCamelCase_ =MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
lowerCamelCase_ =re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" , _A )
if matches:
lowerCamelCase_ =float(matches[1] )
lowerCamelCase_ =int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
lowerCamelCase_ =1001
lowerCamelCase_ ="""imagenet-1k-id2label.json"""
lowerCamelCase_ ="""huggingface/label-files"""
lowerCamelCase_ =json.load(open(hf_hub_download(_A , _A , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase_ ={int(_A ) + 1: v for k, v in idalabel.items()}
lowerCamelCase_ ="""background"""
lowerCamelCase_ =idalabel
lowerCamelCase_ ={v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase ( ) ->int:
"""simple docstring"""
lowerCamelCase_ ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase_ =Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _A : List[Any] , _A : Any , _A : str , _A : int=False ) ->List[str]:
"""simple docstring"""
lowerCamelCase_ =get_mobilenet_va_config(_A )
# Load 🤗 model
lowerCamelCase_ =MobileNetVaForImageClassification(_A ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_A , _A , _A )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
lowerCamelCase_ =MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
lowerCamelCase_ =image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCamelCase_ =model(**_A )
lowerCamelCase_ =outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
lowerCamelCase_ =torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
lowerCamelCase_ =torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
lowerCamelCase_ =None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _A , atol=1E-4 )
Path(_A ).mkdir(exist_ok=_A )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_A )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_A )
if push_to_hub:
print("""Pushing to the hub...""" )
lowerCamelCase_ ="""google/""" + model_name
image_processor.push_to_hub(_A )
model.push_to_hub(_A )
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__A : List[str] = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 154 | 0 |
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase_ ( _lowercase ):
lowerCAmelCase_ = '''facebook/bart-large-mnli'''
lowerCAmelCase_ = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
lowerCAmelCase_ = '''text_classifier'''
lowerCAmelCase_ = AutoTokenizer
lowerCAmelCase_ = AutoModelForSequenceClassification
lowerCAmelCase_ = ['''text''', ['''text''']]
lowerCAmelCase_ = ['''text''']
def lowerCAmelCase ( self ) -> Any:
super().setup()
_snake_case = self.model.config
_snake_case = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
_snake_case = int(__UpperCamelCase )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_snake_case = labels
return self.pre_processor(
[text] * len(__UpperCamelCase ) , [F'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = outputs.logits
_snake_case = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 360 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ ( enum.Enum ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
lowerCAmelCase_ = 2
@add_end_docstrings(_lowerCamelCase )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_snake_case = None
if self.model.config.prefix is not None:
_snake_case = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_snake_case = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_snake_case , _snake_case , _snake_case = self._sanitize_parameters(prefix=lowerCAmelCase_ , **self._forward_params )
_snake_case = {**self._preprocess_params, **preprocess_params}
_snake_case = {**self._forward_params, **forward_params}
def lowerCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = {}
if prefix is not None:
_snake_case = prefix
if prefix:
_snake_case = self.tokenizer(
lowerCAmelCase_ , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
_snake_case = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
' [None, \'hole\']' )
_snake_case = handle_long_generation
preprocess_params.update(lowerCAmelCase_ )
_snake_case = generate_kwargs
_snake_case = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
_snake_case = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
_snake_case = ReturnType.TENSORS
if return_type is not None:
_snake_case = return_type
if clean_up_tokenization_spaces is not None:
_snake_case = clean_up_tokenization_spaces
if stop_sequence is not None:
_snake_case = self.tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
_snake_case = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]:
return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_="" , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Any:
_snake_case = self.tokenizer(
prefix + prompt_text , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
_snake_case = prompt_text
if handle_long_generation == "hole":
_snake_case = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
_snake_case = generate_kwargs['max_new_tokens']
else:
_snake_case = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_snake_case = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
_snake_case = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
_snake_case = inputs['attention_mask'][:, -keep_length:]
return inputs
def lowerCAmelCase ( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = model_inputs['input_ids']
_snake_case = model_inputs.get('attention_mask' , lowerCAmelCase_ )
# Allow empty prompts
if input_ids.shape[1] == 0:
_snake_case = None
_snake_case = None
_snake_case = 1
else:
_snake_case = input_ids.shape[0]
_snake_case = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_snake_case = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
_snake_case = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
_snake_case = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_snake_case = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_snake_case = self.model.generate(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = generated_sequence.shape[0]
if self.framework == "pt":
_snake_case = generated_sequence.reshape(lowerCAmelCase_ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_snake_case = tf.reshape(lowerCAmelCase_ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=ReturnType.FULL_TEXT , lowerCAmelCase_=True ) -> int:
_snake_case = model_outputs['generated_sequence'][0]
_snake_case = model_outputs['input_ids']
_snake_case = model_outputs['prompt_text']
_snake_case = generated_sequence.numpy().tolist()
_snake_case = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_snake_case = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_snake_case = self.tokenizer.decode(
lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_snake_case = 0
else:
_snake_case = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , ) )
if return_type == ReturnType.FULL_TEXT:
_snake_case = prompt_text + text[prompt_length:]
else:
_snake_case = text[prompt_length:]
_snake_case = {'generated_text': all_text}
records.append(lowerCAmelCase_ )
return records
| 295 | 0 |
'''simple docstring'''
def _lowercase ( __A ):
'''simple docstring'''
if not isinstance(__A ,__A ):
__UpperCamelCase = f"Input value of [number={number}] must be an integer"
raise TypeError(__A )
if number < 0:
return False
__UpperCamelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 |
'''simple docstring'''
import pytest
a__ : List[str] = '__dummy_dataset1__'
a__ : Optional[int] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def _lowercase ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _lowercase ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = dataset_loading_script_name
__UpperCamelCase = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=__A )
__UpperCamelCase = script_dir / f"{script_name}.py"
with open(__A ,"""w""" ) as f:
f.write(__A )
return str(__A )
| 349 | 1 |
_A = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def lowerCamelCase__ ( a__ : float ) -> str:
assert type(a__ ) in (int, float) and decimal == int(a__ )
UpperCamelCase_ = int(a__ )
UpperCamelCase_ = """"""
UpperCamelCase_ = False
if decimal < 0:
UpperCamelCase_ = True
decimal *= -1
while decimal > 0:
UpperCamelCase_ , UpperCamelCase_ = divmod(a__ , 16 )
UpperCamelCase_ = values[remainder] + hexadecimal
UpperCamelCase_ = """0x""" + hexadecimal
if negative:
UpperCamelCase_ = """-""" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 261 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase__ ( a__ : Dict ) -> List[Any]:
UpperCamelCase_ = {}
UpperCamelCase_ = tokenizer(example["""content"""] , truncation=a__ )["""input_ids"""]
UpperCamelCase_ = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
_A = HfArgumentParser(PretokenizationArguments)
_A = parser.parse_args()
if args.num_workers is None:
_A = multiprocessing.cpu_count()
_A = AutoTokenizer.from_pretrained(args.tokenizer_dir)
_A = time.time()
_A = load_dataset(args.dataset_name, split='''train''')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
_A = time.time()
_A = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
_A = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 261 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.