code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def UpperCAmelCase_ ( ) -> Any: raise RuntimeError('''CUDA out of memory.''' ) class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : Tuple ): super().__init__() __lowercase : Dict = nn.Linear(3 , 4 ) __lowercase : Union[str, Any] = nn.BatchNormad(4 ) __lowercase : List[Any] = nn.Linear(4 , 5 ) def snake_case_ ( self : str , _snake_case : List[str] ): return self.lineara(self.batchnorm(self.lineara(_snake_case ) ) ) class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def snake_case_ ( self : Union[str, Any] ): __lowercase : Optional[Any] = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(_snake_case : str ): nonlocal batch_sizes batch_sizes.append(_snake_case ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(_snake_case , [128, 64, 32, 16, 8] ) def snake_case_ ( self : List[str] ): __lowercase : Dict = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(_snake_case : Union[str, Any] , _snake_case : Optional[int] ): nonlocal batch_sizes batch_sizes.append(_snake_case ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga __lowercase , __lowercase : Any = mock_training_loop_function('''hello''' ) self.assertListEqual(_snake_case , [128, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, '''hello'''] ) def snake_case_ ( self : List[str] ): @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(_snake_case : Union[str, Any] ): pass with self.assertRaises(_snake_case ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def snake_case_ ( self : List[Any] ): @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(_snake_case : List[Any] ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(_snake_case ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def snake_case_ ( self : Optional[Any] ): @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(_snake_case : List[str] , _snake_case : Optional[Any] , _snake_case : Tuple ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(_snake_case ) as cm: mock_training_loop_function(128 , '''hello''' , '''world''' ) self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] ) self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] ) def snake_case_ ( self : int ): @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(_snake_case : int ): raise ValueError('''Oops, we had an error!''' ) with self.assertRaises(_snake_case ) as cm: mock_training_loop_function() self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] ) @require_cuda def snake_case_ ( self : Union[str, Any] ): __lowercase : Any = torch.cuda.memory_allocated() __lowercase : Optional[int] = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , _snake_case ) __lowercase : Union[str, Any] = release_memory(_snake_case ) self.assertEqual(torch.cuda.memory_allocated() , _snake_case )
156
from math import isqrt, loga def UpperCAmelCase_ ( __lowerCAmelCase ) -> list[int]: __lowercase : Optional[Any] = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , __lowerCAmelCase , __lowerCAmelCase ): __lowercase : Dict = False return [i for i in range(2 , __lowerCAmelCase ) if is_prime[i]] def UpperCAmelCase_ ( __lowerCAmelCase = 800_800 , __lowerCAmelCase = 800_800 ) -> int: __lowercase : Tuple = degree * loga(__lowerCAmelCase ) __lowercase : List[str] = int(__lowerCAmelCase ) __lowercase : Optional[Any] = calculate_prime_numbers(__lowerCAmelCase ) __lowercase : Any = 0 __lowercase : int = 0 __lowercase : Tuple = len(__lowerCAmelCase ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(F'{solution() = }')
156
1
"""simple docstring""" import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): lowercase__ : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right lowercase__ : Optional[Any] = 1_2_8_0_2_2 lowercase__ : int = 1_2_8_0_2_8 @require_sentencepiece class UpperCamelCase__ ( lowercase_, unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = MaMaaaTokenizer _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = True def SCREAMING_SNAKE_CASE__ ( self : Tuple ): super().setUp() lowerCAmelCase_ : Any = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>'] lowerCAmelCase_ : Dict = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) lowerCAmelCase_ : Tuple = Path(self.tmpdirname ) save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['vocab_file'] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['spm_file'] ) lowerCAmelCase_ : Dict = MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , **SCREAMING_SNAKE_CASE_ : str ): return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] ): return ( "This is a test", "This is a test", ) def SCREAMING_SNAKE_CASE__ ( self : Dict ): lowerCAmelCase_ : Tuple = '</s>' lowerCAmelCase_ : Dict = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): lowerCAmelCase_ : Dict = self.get_tokenizer() lowerCAmelCase_ : Optional[Any] = list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '</s>' ) self.assertEqual(vocab_keys[1] , '<unk>' ) self.assertEqual(vocab_keys[-1] , '<s>' ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip('Skip this test while all models are still to be uploaded.' ) def SCREAMING_SNAKE_CASE__ ( self : str ): pass def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowerCAmelCase_ : Any = self.get_tokenizer() lowerCAmelCase_ : Any = tokenizer.tokenize('This is a test' ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [2, 3, 4, 5, 6] , ) lowerCAmelCase_ : int = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) lowerCAmelCase_ : List[str] = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , 'This is a test' ) @slow def SCREAMING_SNAKE_CASE__ ( self : Tuple ): # fmt: off lowerCAmelCase_ : str = {'input_ids': [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='facebook/m2m100_418M' , revision='c168bae485c864188cf9aa0e4108b0b6934dc91e' , ) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = """facebook/m2m100_418M""" _SCREAMING_SNAKE_CASE = [ """In my opinion, there are two levels of response from the French government.""", """NSA Affair Emphasizes Complete Lack of Debate on Intelligence""", ] _SCREAMING_SNAKE_CASE = [ """Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""", """L'affaire NSA souligne l'absence totale de débat sur le renseignement""", ] # fmt: off _SCREAMING_SNAKE_CASE = [EN_CODE, 593, 1949, 11_5781, 4, 7_1586, 4234, 6_0633, 12_6233, 432, 12_3808, 1_5592, 1197, 11_7132, 12_0618, 5, 2] @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] ): lowerCAmelCase_ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='en' , tgt_lang='fr' ) lowerCAmelCase_ : Any = 1 return cls def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): self.assertEqual(self.tokenizer.get_lang_id('ar' ) , 1_2_8_0_0_6 ) self.assertEqual(self.tokenizer.get_lang_id('en' ) , 1_2_8_0_2_2 ) self.assertEqual(self.tokenizer.get_lang_id('ro' ) , 1_2_8_0_7_6 ) self.assertEqual(self.tokenizer.get_lang_id('mr' ) , 1_2_8_0_6_3 ) def SCREAMING_SNAKE_CASE__ ( self : str ): lowerCAmelCase_ : str = self.tokenizer.get_vocab() self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.tokenizer.vocab_size ) self.assertEqual(vocab['<unk>'] , 3 ) self.assertIn(self.tokenizer.get_lang_token('en' ) , SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE__ ( self : int ): lowerCAmelCase_ : int = 'en' lowerCAmelCase_ : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ): self.assertIn(SCREAMING_SNAKE_CASE_ , self.tokenizer.all_special_ids ) # fmt: off lowerCAmelCase_ : str = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2] # fmt: on lowerCAmelCase_ : Any = self.tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE__ ( self : Dict ): lowerCAmelCase_ : str = tempfile.mkdtemp() lowerCAmelCase_ : Optional[Any] = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : Union[str, Any] = MaMaaaTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertDictEqual(new_tok.lang_token_to_id , SCREAMING_SNAKE_CASE_ ) @require_torch def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): lowerCAmelCase_ : List[str] = 'en' lowerCAmelCase_ : Optional[Any] = 'fr' lowerCAmelCase_ : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ) lowerCAmelCase_ : Tuple = shift_tokens_right( batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id ) for k in batch: lowerCAmelCase_ : Dict = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def SCREAMING_SNAKE_CASE__ ( self : Tuple ): lowerCAmelCase_ : Union[str, Any] = 'mr' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) lowerCAmelCase_ : Tuple = 'zh' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) @require_torch def SCREAMING_SNAKE_CASE__ ( self : Any ): lowerCAmelCase_ : Dict = 'mr' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) lowerCAmelCase_ : int = 'zh' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def SCREAMING_SNAKE_CASE__ ( self : Dict ): lowerCAmelCase_ : int = self.tokenizer._build_translation_inputs('A test' , return_tensors='pt' , src_lang='en' , tgt_lang='ar' ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE_ ) , { # en_XX, A, test, EOS 'input_ids': [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 1_2_8_0_0_6, } , )
350
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) lowercase__ : Optional[int] = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Dict = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys lowercase__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
289
0
import json import os import unittest from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors @require_tokenizers class lowerCamelCase (__lowerCamelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase__ = MvpTokenizer lowerCamelCase__ = MvpTokenizerFast lowerCamelCase__ = True lowerCamelCase__ = filter_roberta_detectors def __A ( self : Optional[Any] ) -> Union[str, Any]: super().setUp() SCREAMING_SNAKE_CASE_ = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] SCREAMING_SNAKE_CASE_ = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) ) SCREAMING_SNAKE_CASE_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] SCREAMING_SNAKE_CASE_ = {'''unk_token''': '''<unk>'''} SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__magic_name__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(__magic_name__ ) ) def __A ( self : Any , **__magic_name__ : Optional[int] ) -> List[str]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ ) def __A ( self : Tuple , **__magic_name__ : List[str] ) -> Any: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ ) def __A ( self : List[Any] , __magic_name__ : int ) -> Optional[Any]: return "lower newer", "lower newer" @cached_property def __A ( self : Union[str, Any] ) -> List[str]: return MvpTokenizer.from_pretrained("RUCAIBox/mvp" ) @cached_property def __A ( self : List[str] ) -> Optional[int]: return MvpTokenizerFast.from_pretrained("RUCAIBox/mvp" ) @require_torch def __A ( self : Union[str, Any] ) -> int: SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] SCREAMING_SNAKE_CASE_ = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , max_length=len(__magic_name__ ) , padding=__magic_name__ , return_tensors="pt" ) self.assertIsInstance(__magic_name__ , __magic_name__ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) SCREAMING_SNAKE_CASE_ = batch.input_ids.tolist()[0] self.assertListEqual(__magic_name__ , __magic_name__ ) # Test that special tokens are reset @require_torch def __A ( self : Union[str, Any] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="pt" ) # check if input_ids are returned and no labels self.assertIn("input_ids" , __magic_name__ ) self.assertIn("attention_mask" , __magic_name__ ) self.assertNotIn("labels" , __magic_name__ ) self.assertNotIn("decoder_attention_mask" , __magic_name__ ) @require_torch def __A ( self : Tuple ) -> Union[str, Any]: SCREAMING_SNAKE_CASE_ = [ '''Summary of the text.''', '''Another summary.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: SCREAMING_SNAKE_CASE_ = tokenizer(text_target=__magic_name__ , max_length=32 , padding="max_length" , return_tensors="pt" ) self.assertEqual(32 , targets["input_ids"].shape[1] ) @require_torch def __A ( self : Optional[Any] ) -> int: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: SCREAMING_SNAKE_CASE_ = tokenizer( ["I am a small frog" * 1_024, "I am a small frog"] , padding=__magic_name__ , truncation=__magic_name__ , return_tensors="pt" ) self.assertIsInstance(__magic_name__ , __magic_name__ ) self.assertEqual(batch.input_ids.shape , (2, 1_024) ) @require_torch def __A ( self : Optional[int] ) -> Optional[int]: SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization.'''] SCREAMING_SNAKE_CASE_ = [ '''Summary of the text.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , text_target=__magic_name__ , return_tensors="pt" ) SCREAMING_SNAKE_CASE_ = inputs['''input_ids'''] SCREAMING_SNAKE_CASE_ = inputs['''labels'''] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) def __A ( self : Tuple ) -> str: pass def __A ( self : str ) -> List[Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ ) SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ ) SCREAMING_SNAKE_CASE_ = '''A, <mask> AllenNLP sentence.''' SCREAMING_SNAKE_CASE_ = tokenizer_r.encode_plus(__magic_name__ , add_special_tokens=__magic_name__ , return_token_type_ids=__magic_name__ ) SCREAMING_SNAKE_CASE_ = tokenizer_p.encode_plus(__magic_name__ , add_special_tokens=__magic_name__ , return_token_type_ids=__magic_name__ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) SCREAMING_SNAKE_CASE_ = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) SCREAMING_SNAKE_CASE_ = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( __magic_name__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( __magic_name__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
118
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class lowercase__ ( __lowerCamelCase ): '''simple docstring''' def UpperCamelCase__ ( self, __magic_name__ ) -> Union[str, Any]: """simple docstring""" with open(__magic_name__, encoding='''utf-8''' ) as input_file: UpperCamelCase__ : Tuple = re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' ) UpperCamelCase__ : str = input_file.read() UpperCamelCase__ : List[Any] = regexp.search(__magic_name__ ) return match def UpperCamelCase__ ( self, __magic_name__ ) -> Any: """simple docstring""" with open(__magic_name__, encoding='''utf-8''' ) as input_file: UpperCamelCase__ : Dict = re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''', re.DOTALL ) UpperCamelCase__ : Any = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` UpperCamelCase__ : Tuple = regexp.finditer(__magic_name__ ) UpperCamelCase__ : Dict = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def UpperCamelCase__ ( self ) -> List[Any]: """simple docstring""" UpperCamelCase__ : int = Path('''./datasets''' ) UpperCamelCase__ : Any = list(dataset_paths.absolute().glob('''**/*.py''' ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(__magic_name__ ) ): raise AssertionError(f"open(...) must use utf-8 encoding in {dataset}" ) def UpperCamelCase__ ( self ) -> Dict: """simple docstring""" UpperCamelCase__ : Optional[int] = Path('''./datasets''' ) UpperCamelCase__ : Optional[Any] = list(dataset_paths.absolute().glob('''**/*.py''' ) ) for dataset in dataset_files: if self._no_print_statements(str(__magic_name__ ) ): raise AssertionError(f"print statement found in {dataset}. Use datasets.logger/logging instead." )
201
0
'''simple docstring''' import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __SCREAMING_SNAKE_CASE :Union[str, Any] = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''') @require_sentencepiece @require_tokenizers class A_ ( lowerCAmelCase_ , unittest.TestCase ): _lowerCamelCase : str = GPTSwaTokenizer _lowerCamelCase : Dict = False _lowerCamelCase : List[Any] = True _lowerCamelCase : Optional[int] = False def lowercase ( self : Any ): super().setUp() # We have a SentencePiece fixture for testing _UpperCAmelCase = GPTSwaTokenizer(snake_case_ , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase ( self : List[Any] , snake_case_ : Optional[int] ): _UpperCAmelCase = "This is a test" _UpperCAmelCase = "This is a test" return input_text, output_text def lowercase ( self : int ): _UpperCAmelCase = "<s>" _UpperCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ ) def lowercase ( self : Any ): _UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(snake_case_ ) , 2_0_0_0 ) def lowercase ( self : List[Any] ): self.assertEqual(self.get_tokenizer().vocab_size , 2_0_0_0 ) def lowercase ( self : str ): _UpperCAmelCase = GPTSwaTokenizer(snake_case_ ) _UpperCAmelCase = tokenizer.tokenize("This is a test" ) self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2] ) _UpperCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." ) # fmt: off self.assertListEqual( snake_case_ , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , ) # fmt: on _UpperCAmelCase = tokenizer.convert_tokens_to_ids(snake_case_ ) self.assertListEqual( snake_case_ , [2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0] , ) _UpperCAmelCase = tokenizer.convert_ids_to_tokens(snake_case_ ) # fmt: off self.assertListEqual( snake_case_ , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] ) # fmt: on def lowercase ( self : Any ): _UpperCAmelCase = GPTSwaTokenizer(snake_case_ ) _UpperCAmelCase = ["This is a test", "I was born in 92000, and this is falsé."] _UpperCAmelCase = [ [4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2], [2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(snake_case_ , snake_case_ ): self.assertListEqual(tokenizer.encode_fast(snake_case_ ) , snake_case_ ) # Test that decode_fast returns the input text for text, token_ids in zip(snake_case_ , snake_case_ ): self.assertEqual(tokenizer.decode_fast(snake_case_ ) , snake_case_ ) @slow def lowercase ( self : int ): _UpperCAmelCase = [ "<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')", "Hey there, how are you doing this fine day?", "This is a text with a trailing spaces followed by a dot .", "Häj sväjs lillebrör! =)", "Det är inget fel på Mr. Cool", ] # fmt: off _UpperCAmelCase = {"input_ids": [[6_3_4_2_3, 5, 6_8_1_1, 1_4_9_5_4, 2_8_2, 8_1_6, 3_8_2_1, 6_3_4_6_6, 6_3_4_2_5, 6_3_4_6_2, 1_8, 6_3_9_7_8, 6_7_8, 3_0_1, 1_3_2_0, 6_3_4_2_3, 6_3_4_5_5, 6_3_4_5_8, 1_8, 6_3_9_8_2, 4_2_4_6, 3_9_4_0, 1_9_0_1, 4_7_7_8_9, 5_5_4_7, 1_8_9_9_4], [1_9_6_3_0, 1_1_0_0, 6_3_4_4_6, 1_3_4_2, 6_3_3, 5_4_4, 4_4_8_8, 5_9_3, 5_1_0_2, 2_4_1_6, 6_3_4_9_5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_6_5_2, 4_2_8, 2_6_8, 1_9_3_6, 5_1_5, 2_6_8, 5_8_5_9_3, 2_2_4_1_3, 9_1_0_6, 5_4_6, 2_6_8, 3_3_2_1_3, 6_3_9_7_9, 6_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5_1_3_0, 6_3_4_5_0, 9_2_4, 6_3_4_4_9, 2_2_4_9, 4_0_6_2, 1_5_5_8, 3_1_8, 6_3_5_0_4, 2_1_4_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_0_9, 3_7_7, 2_8_2_7, 2_5_5_9, 3_3_2, 6_5_7_5, 6_3_4_4_3, 2_6_8_0_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case_ , model_name="AI-Sweden/gpt-sw3-126m" , sequences=snake_case_ , )
156
'''simple docstring''' from typing import Union import fire import torch from tqdm import tqdm def UpperCAmelCase_ ( __lowercase : str , __lowercase : str = "cpu" , __lowercase : Union[str, None] = None ) -> None: '''simple docstring''' _UpperCAmelCase = torch.load(__lowercase , map_location=__lowercase ) for k, v in tqdm(state_dict.items() ): if not isinstance(__lowercase , torch.Tensor ): raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" ) _UpperCAmelCase = v.half() if save_path is None: # overwrite src_path _UpperCAmelCase = src_path torch.save(__lowercase , __lowercase ) if __name__ == "__main__": fire.Fire(convert)
156
1
def __magic_name__ ( A : list ): '''simple docstring''' def merge(A : list, A : list ) -> list: def _merge(): while left and right: yield (left if left[0] <= right[0] else right).pop(0 ) yield from left yield from right return list(_merge() ) if len(A ) <= 1: return collection a = len(A ) // 2 return merge(merge_sort(collection[:mid] ), merge_sort(collection[mid:] ) ) if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : List[Any] = input('Enter numbers separated by a comma:\n').strip() __lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(',')] print(*merge_sort(unsorted), sep=',')
107
"""simple docstring""" def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]: if height >= 1: move_tower(height - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) move_disk(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) move_tower(height - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]: print('moving disk from' , _SCREAMING_SNAKE_CASE , 'to' , _SCREAMING_SNAKE_CASE ) def __a ( ) ->List[str]: a__: Dict = int(input('Height of hanoi: ' ).strip() ) move_tower(_SCREAMING_SNAKE_CASE , 'A' , 'B' , 'C' ) if __name__ == "__main__": main()
290
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = """▁""" UpperCAmelCase__ = {"""vocab_file""": """sentencepiece.bpe.model"""} UpperCAmelCase__ = { """vocab_file""": { """facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""", } } UpperCAmelCase__ = { """facebook/xglm-564M""": 2048, } class __lowerCAmelCase ( A ): UpperCamelCase = VOCAB_FILES_NAMES UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase = ["input_ids", "attention_mask"] def __init__( self : int , A : Optional[int] , A : str="<s>" , A : Optional[Any]="</s>" , A : Optional[int]="</s>" , A : List[Any]="<s>" , A : Tuple="<unk>" , A : List[str]="<pad>" , A : Optional[Dict[str, Any]] = None , **A : List[Any] , ) -> None: """simple docstring""" _UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer _UpperCAmelCase = 7 _UpperCAmelCase = [F"<madeupword{i}>" for i in range(self.num_madeup_words)] _UpperCAmelCase = kwargs.get('additional_special_tokens' , []) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , ) _UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(A)) _UpperCAmelCase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab _UpperCAmelCase = 1 # Mimic fairseq token-to-id alignment for the first 4 token _UpperCAmelCase = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} _UpperCAmelCase = len(self.sp_model) _UpperCAmelCase = {F"<madeupword{i}>": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words)} self.fairseq_tokens_to_ids.update(A) _UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Tuple) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = self.__dict__.copy() _UpperCAmelCase = None _UpperCAmelCase = self.sp_model.serialized_model_proto() return state def __setstate__( self : Optional[Any] , A : Optional[int]) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs'): _UpperCAmelCase = {} _UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) def _lowerCamelCase ( self : Any , A : List[int] , A : Optional[List[int]] = None) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.sep_token_id] + token_ids_a _UpperCAmelCase = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def _lowerCamelCase ( self : Dict , A : List[int] , A : Optional[List[int]] = None , A : bool = False) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A , token_ids_a=A , already_has_special_tokens=A) if token_ids_a is None: return [1] + ([0] * len(A)) return [1] + ([0] * len(A)) + [1, 1] + ([0] * len(A)) def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]: """simple docstring""" _UpperCAmelCase = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a) * [0] @property def _lowerCamelCase ( self : int) -> Optional[int]: """simple docstring""" return len(self.sp_model) + self.fairseq_offset + self.num_madeup_words def _lowerCamelCase ( self : str) -> Dict: """simple docstring""" _UpperCAmelCase = {self.convert_ids_to_tokens(A): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _lowerCamelCase ( self : str , A : str) -> List[str]: """simple docstring""" return self.sp_model.encode(A , out_type=A) def _lowerCamelCase ( self : Optional[Any] , A : Optional[int]) -> int: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _UpperCAmelCase = self.sp_model.PieceToId(A) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _lowerCamelCase ( self : int , A : int) -> Tuple: """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def _lowerCamelCase ( self : Optional[int] , A : List[str]) -> List[Any]: """simple docstring""" _UpperCAmelCase = ''.join(A).replace(A , ' ').strip() return out_string def _lowerCamelCase ( self : Optional[int] , A : str , A : Optional[str] = None) -> Tuple[str]: """simple docstring""" if not os.path.isdir(A): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return _UpperCAmelCase = os.path.join( A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(A) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , A) elif not os.path.isfile(self.vocab_file): with open(A , 'wb') as fi: _UpperCAmelCase = self.sp_model.serialized_model_proto() fi.write(A) return (out_vocab_file,)
353
import json import os import unittest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __lowerCAmelCase ( A , unittest.TestCase ): UpperCamelCase = CLIPTokenizer UpperCamelCase = CLIPTokenizerFast UpperCamelCase = True UpperCamelCase = {} UpperCamelCase = False def _lowerCamelCase ( self : List[str]) -> List[str]: """simple docstring""" super().setUp() # fmt: off _UpperCAmelCase = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>'] # fmt: on _UpperCAmelCase = dict(zip(A , range(len(A)))) _UpperCAmelCase = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>'] _UpperCAmelCase = {'unk_token': '<unk>'} _UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file']) _UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file']) with open(self.vocab_file , 'w' , encoding='utf-8') as fp: fp.write(json.dumps(A) + '\n') with open(self.merges_file , 'w' , encoding='utf-8') as fp: fp.write('\n'.join(A)) def _lowerCamelCase ( self : Optional[Any] , **A : str) -> Optional[int]: """simple docstring""" kwargs.update(self.special_tokens_map) return CLIPTokenizer.from_pretrained(self.tmpdirname , **A) def _lowerCamelCase ( self : Any , **A : Dict) -> Union[str, Any]: """simple docstring""" kwargs.update(self.special_tokens_map) return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A) def _lowerCamelCase ( self : Optional[int] , A : Union[str, Any]) -> int: """simple docstring""" _UpperCAmelCase = 'lower newer' _UpperCAmelCase = 'lower newer' return input_text, output_text def _lowerCamelCase ( self : Dict) -> Any: """simple docstring""" _UpperCAmelCase = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map) _UpperCAmelCase = 'lower newer' _UpperCAmelCase = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>'] _UpperCAmelCase = tokenizer.tokenize(A) self.assertListEqual(A , A) _UpperCAmelCase = tokens + [tokenizer.unk_token] _UpperCAmelCase = [10, 2, 16, 9, 3, 2, 16, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(A) , A) @require_ftfy def _lowerCamelCase ( self : List[Any]) -> Dict: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"): _UpperCAmelCase = self.tokenizer_class.from_pretrained(A , **A) _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(A , **A) _UpperCAmelCase = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.' _UpperCAmelCase = tokenizer_s.tokenize(A) _UpperCAmelCase = tokenizer_r.tokenize(A) self.assertListEqual(A , A) # Test that the tokenization is identical on an example containing a character (Latin Small Letter A # with Tilde) encoded in 2 different ways _UpperCAmelCase = 'xa\u0303y' + ' ' + 'x\xe3y' _UpperCAmelCase = tokenizer_s.tokenize(A) _UpperCAmelCase = tokenizer_r.tokenize(A) self.assertListEqual(A , A) # Test that the tokenization is identical on unicode of space type _UpperCAmelCase = [ '\u0009', # (horizontal tab, '\t') '\u000B', # (vertical tab) '\u000C', # (form feed) '\u0020', # (space, ' ') '\u200E', # (left-to-right mark):w '\u200F', # (right-to-left mark) ] for unicode_seq in spaces_unicodes: _UpperCAmelCase = tokenizer_s.tokenize(A) _UpperCAmelCase = tokenizer_r.tokenize(A) self.assertListEqual(A , A) # Test that the tokenization is identical on unicode of line break type _UpperCAmelCase = [ '\u000A', # (line feed, '\n') '\r\n', # (carriage return and line feed, '\r\n') '\u000D', # (carriage return, '\r') '\r', # (carriage return, '\r') '\u000D', # (carriage return, '\r') '\u2028', # (line separator) '\u2029', # (paragraph separator) # "\u0085", # (next line) ] # The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms # it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a # space (and thus into an empty list). for unicode_seq in line_break_unicodes: _UpperCAmelCase = tokenizer_s.tokenize(A) _UpperCAmelCase = tokenizer_r.tokenize(A) self.assertListEqual(A , A) def _lowerCamelCase ( self : str) -> Optional[Any]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"): _UpperCAmelCase = 'hello' # `hello` is a token in the vocabulary of `pretrained_name` _UpperCAmelCase = F"{text_of_1_token} {text_of_1_token}" _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , ) _UpperCAmelCase = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A) self.assertEqual(encoding.offset_mapping[0] , (0, len(A))) self.assertEqual( encoding.offset_mapping[1] , (len(A) + 1, len(A) + 1 + len(A)) , ) _UpperCAmelCase = F" {text}" _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , ) _UpperCAmelCase = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A))) self.assertEqual( encoding.offset_mapping[1] , (1 + len(A) + 1, 1 + len(A) + 1 + len(A)) , ) def _lowerCamelCase ( self : Tuple) -> str: """simple docstring""" with self.assertRaises(A) as context: self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer') self.assertTrue( context.exception.args[0].startswith( 'The `backend_tokenizer` provided does not match the expected format.')) @require_ftfy def _lowerCamelCase ( self : int) -> int: """simple docstring""" super().test_tokenization_python_rust_equals() def _lowerCamelCase ( self : Union[str, Any]) -> Any: """simple docstring""" pass
290
0
"""simple docstring""" class _UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' pass class _UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' pass class _UpperCamelCase : '''simple docstring''' def __init__( self ): __lowerCAmelCase = [ [], [], [], ] def snake_case ( self , __a , __a ): try: if len(self.queues[priority] ) >= 1_00: raise OverflowError("Maximum queue size is 100" ) self.queues[priority].append(__a ) except IndexError: raise ValueError("Valid priorities are 0, 1, and 2" ) def snake_case ( self ): for queue in self.queues: if queue: return queue.pop(0 ) raise UnderFlowError("All queues are empty" ) def __str__( self ): return "\n".join(f"Priority {i}: {q}" for i, q in enumerate(self.queues ) ) class _UpperCamelCase : '''simple docstring''' def __init__( self ): __lowerCAmelCase = [] def snake_case ( self , __a ): if len(self.queue ) == 1_00: raise OverFlowError("Maximum queue size is 100" ) self.queue.append(__a ) def snake_case ( self ): if not self.queue: raise UnderFlowError("The queue is empty" ) else: __lowerCAmelCase = min(self.queue ) self.queue.remove(__a ) return data def __str__( self ): return str(self.queue ) def _lowerCamelCase ( ): '''simple docstring''' __lowerCAmelCase = FixedPriorityQueue() fpq.enqueue(0 , 10 ) fpq.enqueue(1 , 70 ) fpq.enqueue(0 , 100 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 64 ) fpq.enqueue(0 , 128 ) print(_UpperCamelCase ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(_UpperCamelCase ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def _lowerCamelCase ( ): '''simple docstring''' __lowerCAmelCase = ElementPriorityQueue() epq.enqueue(10 ) epq.enqueue(70 ) epq.enqueue(100 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(64 ) epq.enqueue(128 ) print(_UpperCamelCase ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(_UpperCamelCase ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
57
import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor _A = logging.get_logger(__name__) class A ( __UpperCAmelCase ): def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" warnings.warn( '''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use BeitImageProcessor instead.''', UpperCamelCase__, ) super().__init__(*UpperCamelCase__, **UpperCamelCase__ )
278
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowerCAmelCase__ ( metaclass=__magic_name__ ): SCREAMING_SNAKE_CASE_ =['''torch''', '''scipy'''] def __init__( self : Any , *snake_case__ : str , **snake_case__ : Optional[int] ): '''simple docstring''' requires_backends(self , ["torch", "scipy"] ) @classmethod def __a ( cls : Union[str, Any] , *snake_case__ : Optional[int] , **snake_case__ : Tuple ): '''simple docstring''' requires_backends(cls , ["torch", "scipy"] ) @classmethod def __a ( cls : Union[str, Any] , *snake_case__ : Optional[int] , **snake_case__ : Dict ): '''simple docstring''' requires_backends(cls , ["torch", "scipy"] )
298
"""simple docstring""" import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger _lowerCAmelCase : Optional[int] = get_logger(__name__) _lowerCAmelCase : Any = Path(__file__).parent / """model_card_template.md""" _lowerCAmelCase : Dict = uuida().hex _lowerCAmelCase : Optional[int] = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES _lowerCAmelCase : Optional[int] = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES _lowerCAmelCase : int = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/""" def SCREAMING_SNAKE_CASE__ ( snake_case : Union[Dict, str, None] = None )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[int] = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}' if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f'; torch/{_torch_version}' if is_flax_available(): ua += f'; jax/{_jax_version}' ua += f'; flax/{_flax_version}' if is_onnx_available(): ua += f'; onnxruntime/{_onnxruntime_version}' # CI will set this value to True if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(snake_case , snake_case ): ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items() ) elif isinstance(snake_case , snake_case ): ua += "; " + user_agent return ua def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> List[str]: '''simple docstring''' if token is None: UpperCAmelCase__ : Optional[Any] = HfFolder.get_token() if organization is None: UpperCAmelCase__ : Tuple = whoami(snake_case )["name"] return f'{username}/{model_id}' else: return f'{organization}/{model_id}' def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : List[Any] )-> List[Any]: '''simple docstring''' if not is_jinja_available(): raise ValueError( "Modelcard rendering is based on Jinja templates." " Please make sure to have `jinja` installed before using `create_model_card`." " To install it, please run `pip install Jinja2`." ) if hasattr(snake_case , "local_rank" ) and args.local_rank not in [-1, 0]: return UpperCAmelCase__ : int = args.hub_token if hasattr(snake_case , "hub_token" ) else None UpperCAmelCase__ : Optional[Any] = get_full_repo_name(snake_case , token=snake_case ) UpperCAmelCase__ : Tuple = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=snake_case , model_name=snake_case , repo_name=snake_case , dataset_name=args.dataset_name if hasattr(snake_case , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(snake_case , "gradient_accumulation_steps" ) else None ) , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(snake_case , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(snake_case , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(snake_case , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(snake_case , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(snake_case , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(snake_case , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(snake_case , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , ) UpperCAmelCase__ : List[str] = os.path.join(args.output_dir , "README.md" ) model_card.save(snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] , snake_case : Optional[str] = None )-> Tuple: '''simple docstring''' if resolved_file is None or commit_hash is not None: return commit_hash UpperCAmelCase__ : Dict = str(Path(snake_case ).as_posix() ) UpperCAmelCase__ : Optional[int] = re.search(r"snapshots/([^/]+)/" , snake_case ) if search is None: return None UpperCAmelCase__ : Dict = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(snake_case ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. _lowerCAmelCase : Dict = os.path.expanduser( os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface""")) ) _lowerCAmelCase : List[Any] = os.path.join(hf_cache_home, """diffusers""") def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> None: '''simple docstring''' if new_cache_dir is None: UpperCAmelCase__ : Union[str, Any] = DIFFUSERS_CACHE if old_cache_dir is None: UpperCAmelCase__ : str = old_diffusers_cache UpperCAmelCase__ : List[str] = Path(snake_case ).expanduser() UpperCAmelCase__ : Any = Path(snake_case ).expanduser() for old_blob_path in old_cache_dir.glob("**/blobs/*" ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): UpperCAmelCase__ : Dict = new_cache_dir / old_blob_path.relative_to(snake_case ) new_blob_path.parent.mkdir(parents=snake_case , exist_ok=snake_case ) os.replace(snake_case , snake_case ) try: os.symlink(snake_case , snake_case ) except OSError: logger.warning( "Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). _lowerCAmelCase : Tuple = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""") if not os.path.isfile(cache_version_file): _lowerCAmelCase : Any = 0 else: with open(cache_version_file) as f: try: _lowerCAmelCase : List[str] = int(f.read()) except ValueError: _lowerCAmelCase : Optional[int] = 0 if cache_version < 1: _lowerCAmelCase : List[str] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( """The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """ """existing cached models. This is a one-time operation, you can interrupt it or run it """ """later by calling `diffusers.utils.hub_utils.move_cache()`.""" ) try: move_cache() except Exception as e: _lowerCAmelCase : Dict = """\n""".join(traceback.format_tb(e.__traceback__)) logger.error( F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """ """file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """ """message and we will do our best to help.""" ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, """w""") as f: f.write("""1""") except Exception: logger.warning( F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """ """the directory exists and can be written to.""" ) def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None )-> str: '''simple docstring''' if variant is not None: UpperCAmelCase__ : int = weights_name.split("." ) UpperCAmelCase__ : Optional[Any] = splits[:-1] + [variant] + splits[-1:] UpperCAmelCase__ : Optional[int] = ".".join(snake_case ) return weights_name def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , *, snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : str , snake_case : List[str] , snake_case : Dict , snake_case : Any , snake_case : Any , snake_case : Tuple , snake_case : List[str] , snake_case : Any , snake_case : Optional[int]=None , )-> Tuple: '''simple docstring''' UpperCAmelCase__ : List[str] = str(snake_case ) if os.path.isfile(snake_case ): return pretrained_model_name_or_path elif os.path.isdir(snake_case ): if os.path.isfile(os.path.join(snake_case , snake_case ) ): # Load from a PyTorch checkpoint UpperCAmelCase__ : Any = os.path.join(snake_case , snake_case ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(snake_case , snake_case , snake_case ) ): UpperCAmelCase__ : str = os.path.join(snake_case , snake_case , snake_case ) return model_file else: raise EnvironmentError( f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(snake_case ).base_version ) >= version.parse("0.20.0" ) ): try: UpperCAmelCase__ : List[Any] = hf_hub_download( snake_case , filename=_add_variant(snake_case , snake_case ) , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , ) warnings.warn( f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , snake_case , ) return model_file except: # noqa: E722 warnings.warn( f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(snake_case , snake_case )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(snake_case , snake_case )}\' so that the correct variant file can be added.' , snake_case , ) try: # 2. Load model file as usual UpperCAmelCase__ : Dict = hf_hub_download( snake_case , filename=snake_case , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , ) return model_file except RepositoryNotFoundError: raise EnvironmentError( f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier ' "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "token having permission to this repo with `use_auth_token` or log in with `huggingface-cli " "login`." ) except RevisionNotFoundError: raise EnvironmentError( f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for ' "this model name. Check the model page at " f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' ) except EntryNotFoundError: raise EnvironmentError( f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' ) except HTTPError as err: raise EnvironmentError( f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' ) except ValueError: raise EnvironmentError( f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it' f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a' f' directory containing a file named {weights_name} or' " \nCheckout your internet connection or see how to run the library in" " offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." ) except EnvironmentError: raise EnvironmentError( f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from ' "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory ' f'containing a file named {weights_name}' )
298
1
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = OrderedDict( [ # Base model mapping ('''albert''', '''FlaxAlbertModel'''), ('''bart''', '''FlaxBartModel'''), ('''beit''', '''FlaxBeitModel'''), ('''bert''', '''FlaxBertModel'''), ('''big_bird''', '''FlaxBigBirdModel'''), ('''blenderbot''', '''FlaxBlenderbotModel'''), ('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''), ('''clip''', '''FlaxCLIPModel'''), ('''distilbert''', '''FlaxDistilBertModel'''), ('''electra''', '''FlaxElectraModel'''), ('''gpt-sw3''', '''FlaxGPT2Model'''), ('''gpt2''', '''FlaxGPT2Model'''), ('''gpt_neo''', '''FlaxGPTNeoModel'''), ('''gptj''', '''FlaxGPTJModel'''), ('''longt5''', '''FlaxLongT5Model'''), ('''marian''', '''FlaxMarianModel'''), ('''mbart''', '''FlaxMBartModel'''), ('''mt5''', '''FlaxMT5Model'''), ('''opt''', '''FlaxOPTModel'''), ('''pegasus''', '''FlaxPegasusModel'''), ('''regnet''', '''FlaxRegNetModel'''), ('''resnet''', '''FlaxResNetModel'''), ('''roberta''', '''FlaxRobertaModel'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''), ('''roformer''', '''FlaxRoFormerModel'''), ('''t5''', '''FlaxT5Model'''), ('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''), ('''vit''', '''FlaxViTModel'''), ('''wav2vec2''', '''FlaxWav2Vec2Model'''), ('''whisper''', '''FlaxWhisperModel'''), ('''xglm''', '''FlaxXGLMModel'''), ('''xlm-roberta''', '''FlaxXLMRobertaModel'''), ] ) UpperCAmelCase__ = OrderedDict( [ # Model for pre-training mapping ('''albert''', '''FlaxAlbertForPreTraining'''), ('''bart''', '''FlaxBartForConditionalGeneration'''), ('''bert''', '''FlaxBertForPreTraining'''), ('''big_bird''', '''FlaxBigBirdForPreTraining'''), ('''electra''', '''FlaxElectraForPreTraining'''), ('''longt5''', '''FlaxLongT5ForConditionalGeneration'''), ('''mbart''', '''FlaxMBartForConditionalGeneration'''), ('''mt5''', '''FlaxMT5ForConditionalGeneration'''), ('''roberta''', '''FlaxRobertaForMaskedLM'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''), ('''roformer''', '''FlaxRoFormerForMaskedLM'''), ('''t5''', '''FlaxT5ForConditionalGeneration'''), ('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''), ('''whisper''', '''FlaxWhisperForConditionalGeneration'''), ('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''), ] ) UpperCAmelCase__ = OrderedDict( [ # Model for Masked LM mapping ('''albert''', '''FlaxAlbertForMaskedLM'''), ('''bart''', '''FlaxBartForConditionalGeneration'''), ('''bert''', '''FlaxBertForMaskedLM'''), ('''big_bird''', '''FlaxBigBirdForMaskedLM'''), ('''distilbert''', '''FlaxDistilBertForMaskedLM'''), ('''electra''', '''FlaxElectraForMaskedLM'''), ('''mbart''', '''FlaxMBartForConditionalGeneration'''), ('''roberta''', '''FlaxRobertaForMaskedLM'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''), ('''roformer''', '''FlaxRoFormerForMaskedLM'''), ('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''), ] ) UpperCAmelCase__ = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ('''bart''', '''FlaxBartForConditionalGeneration'''), ('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''), ('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''), ('''encoder-decoder''', '''FlaxEncoderDecoderModel'''), ('''longt5''', '''FlaxLongT5ForConditionalGeneration'''), ('''marian''', '''FlaxMarianMTModel'''), ('''mbart''', '''FlaxMBartForConditionalGeneration'''), ('''mt5''', '''FlaxMT5ForConditionalGeneration'''), ('''pegasus''', '''FlaxPegasusForConditionalGeneration'''), ('''t5''', '''FlaxT5ForConditionalGeneration'''), ] ) UpperCAmelCase__ = OrderedDict( [ # Model for Image-classsification ('''beit''', '''FlaxBeitForImageClassification'''), ('''regnet''', '''FlaxRegNetForImageClassification'''), ('''resnet''', '''FlaxResNetForImageClassification'''), ('''vit''', '''FlaxViTForImageClassification'''), ] ) UpperCAmelCase__ = OrderedDict( [ ('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''), ] ) UpperCAmelCase__ = OrderedDict( [ # Model for Causal LM mapping ('''bart''', '''FlaxBartForCausalLM'''), ('''bert''', '''FlaxBertForCausalLM'''), ('''big_bird''', '''FlaxBigBirdForCausalLM'''), ('''electra''', '''FlaxElectraForCausalLM'''), ('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''), ('''gpt2''', '''FlaxGPT2LMHeadModel'''), ('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''), ('''gptj''', '''FlaxGPTJForCausalLM'''), ('''opt''', '''FlaxOPTForCausalLM'''), ('''roberta''', '''FlaxRobertaForCausalLM'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''), ('''xglm''', '''FlaxXGLMForCausalLM'''), ('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''), ] ) UpperCAmelCase__ = OrderedDict( [ # Model for Sequence Classification mapping ('''albert''', '''FlaxAlbertForSequenceClassification'''), ('''bart''', '''FlaxBartForSequenceClassification'''), ('''bert''', '''FlaxBertForSequenceClassification'''), ('''big_bird''', '''FlaxBigBirdForSequenceClassification'''), ('''distilbert''', '''FlaxDistilBertForSequenceClassification'''), ('''electra''', '''FlaxElectraForSequenceClassification'''), ('''mbart''', '''FlaxMBartForSequenceClassification'''), ('''roberta''', '''FlaxRobertaForSequenceClassification'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''), ('''roformer''', '''FlaxRoFormerForSequenceClassification'''), ('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''), ] ) UpperCAmelCase__ = OrderedDict( [ # Model for Question Answering mapping ('''albert''', '''FlaxAlbertForQuestionAnswering'''), ('''bart''', '''FlaxBartForQuestionAnswering'''), ('''bert''', '''FlaxBertForQuestionAnswering'''), ('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''), ('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''), ('''electra''', '''FlaxElectraForQuestionAnswering'''), ('''mbart''', '''FlaxMBartForQuestionAnswering'''), ('''roberta''', '''FlaxRobertaForQuestionAnswering'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''), ('''roformer''', '''FlaxRoFormerForQuestionAnswering'''), ('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''), ] ) UpperCAmelCase__ = OrderedDict( [ # Model for Token Classification mapping ('''albert''', '''FlaxAlbertForTokenClassification'''), ('''bert''', '''FlaxBertForTokenClassification'''), ('''big_bird''', '''FlaxBigBirdForTokenClassification'''), ('''distilbert''', '''FlaxDistilBertForTokenClassification'''), ('''electra''', '''FlaxElectraForTokenClassification'''), ('''roberta''', '''FlaxRobertaForTokenClassification'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''), ('''roformer''', '''FlaxRoFormerForTokenClassification'''), ('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''), ] ) UpperCAmelCase__ = OrderedDict( [ # Model for Multiple Choice mapping ('''albert''', '''FlaxAlbertForMultipleChoice'''), ('''bert''', '''FlaxBertForMultipleChoice'''), ('''big_bird''', '''FlaxBigBirdForMultipleChoice'''), ('''distilbert''', '''FlaxDistilBertForMultipleChoice'''), ('''electra''', '''FlaxElectraForMultipleChoice'''), ('''roberta''', '''FlaxRobertaForMultipleChoice'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''), ('''roformer''', '''FlaxRoFormerForMultipleChoice'''), ('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''), ] ) UpperCAmelCase__ = OrderedDict( [ ('''bert''', '''FlaxBertForNextSentencePrediction'''), ] ) UpperCAmelCase__ = OrderedDict( [ ('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''), ('''whisper''', '''FlaxWhisperForConditionalGeneration'''), ] ) UpperCAmelCase__ = OrderedDict( [ ('''whisper''', '''FlaxWhisperForAudioClassification'''), ] ) UpperCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) UpperCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) UpperCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) UpperCAmelCase__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) UpperCAmelCase__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) UpperCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) UpperCAmelCase__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) UpperCAmelCase__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) UpperCAmelCase__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) UpperCAmelCase__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) UpperCAmelCase__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class lowerCamelCase__ ( _BaseAutoModelClass): SCREAMING_SNAKE_CASE__ = FLAX_MODEL_MAPPING UpperCAmelCase__ = auto_class_update(FlaxAutoModel) class lowerCamelCase__ ( _BaseAutoModelClass): SCREAMING_SNAKE_CASE__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING UpperCAmelCase__ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''') class lowerCamelCase__ ( _BaseAutoModelClass): SCREAMING_SNAKE_CASE__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING UpperCAmelCase__ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''') class lowerCamelCase__ ( _BaseAutoModelClass): SCREAMING_SNAKE_CASE__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING UpperCAmelCase__ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''') class lowerCamelCase__ ( _BaseAutoModelClass): SCREAMING_SNAKE_CASE__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCAmelCase__ = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base''' ) class lowerCamelCase__ ( _BaseAutoModelClass): SCREAMING_SNAKE_CASE__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCAmelCase__ = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc='''sequence classification''' ) class lowerCamelCase__ ( _BaseAutoModelClass): SCREAMING_SNAKE_CASE__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING UpperCAmelCase__ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''') class lowerCamelCase__ ( _BaseAutoModelClass): SCREAMING_SNAKE_CASE__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCAmelCase__ = auto_class_update( FlaxAutoModelForTokenClassification, head_doc='''token classification''' ) class lowerCamelCase__ ( _BaseAutoModelClass): SCREAMING_SNAKE_CASE__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING UpperCAmelCase__ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''') class lowerCamelCase__ ( _BaseAutoModelClass): SCREAMING_SNAKE_CASE__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING UpperCAmelCase__ = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction''' ) class lowerCamelCase__ ( _BaseAutoModelClass): SCREAMING_SNAKE_CASE__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase__ = auto_class_update( FlaxAutoModelForImageClassification, head_doc='''image classification''' ) class lowerCamelCase__ ( _BaseAutoModelClass): SCREAMING_SNAKE_CASE__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase__ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''') class lowerCamelCase__ ( _BaseAutoModelClass): SCREAMING_SNAKE_CASE__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING UpperCAmelCase__ = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling''' )
5
from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput snake_case_ : List[str] = 8 def A (__A : Union[str, Any] , __A : List[Any]=BITS ) -> Tuple: """simple docstring""" UpperCAmelCase_ = x.device UpperCAmelCase_ = (x * 255).int().clamp(0 , 255 ) UpperCAmelCase_ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__A ) UpperCAmelCase_ = rearrange(__A , '''d -> d 1 1''' ) UpperCAmelCase_ = rearrange(__A , '''b c h w -> b c 1 h w''' ) UpperCAmelCase_ = ((x & mask) != 0).float() UpperCAmelCase_ = rearrange(__A , '''b c d h w -> b (c d) h w''' ) UpperCAmelCase_ = bits * 2 - 1 return bits def A (__A : Dict , __A : Tuple=BITS ) -> List[str]: """simple docstring""" UpperCAmelCase_ = x.device UpperCAmelCase_ = (x > 0).int() UpperCAmelCase_ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__A , dtype=torch.intaa ) UpperCAmelCase_ = rearrange(__A , '''d -> d 1 1''' ) UpperCAmelCase_ = rearrange(__A , '''b (c d) h w -> b c d h w''' , d=8 ) UpperCAmelCase_ = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' ) return (dec / 255).clamp(0.0 , 1.0 ) def A (self : List[Any] , __A : torch.FloatTensor , __A : int , __A : torch.FloatTensor , __A : float = 0.0 , __A : bool = True , __A : Tuple=None , __A : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]: """simple docstring""" if self.num_inference_steps is None: raise ValueError( '''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) UpperCAmelCase_ = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas UpperCAmelCase_ = self.alphas_cumprod[timestep] UpperCAmelCase_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod UpperCAmelCase_ = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf UpperCAmelCase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" UpperCAmelCase_ = self.bit_scale if self.config.clip_sample: UpperCAmelCase_ = torch.clamp(__A , -scale , __A ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) UpperCAmelCase_ = self._get_variance(__A , __A ) UpperCAmelCase_ = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide UpperCAmelCase_ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf UpperCAmelCase_ = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf UpperCAmelCase_ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 UpperCAmelCase_ = model_output.device if torch.is_tensor(__A ) else '''cpu''' UpperCAmelCase_ = torch.randn(model_output.shape , dtype=model_output.dtype , generator=__A ).to(__A ) UpperCAmelCase_ = self._get_variance(__A , __A ) ** 0.5 * eta * noise UpperCAmelCase_ = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=__A , pred_original_sample=__A ) def A (self : Optional[int] , __A : torch.FloatTensor , __A : int , __A : torch.FloatTensor , __A : int="epsilon" , __A : Optional[Any]=None , __A : bool = True , ) -> Union[DDPMSchedulerOutput, Tuple]: """simple docstring""" UpperCAmelCase_ = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: UpperCAmelCase_ , UpperCAmelCase_ = torch.split(__A , sample.shape[1] , dim=1 ) else: UpperCAmelCase_ = None # 1. compute alphas, betas UpperCAmelCase_ = self.alphas_cumprod[t] UpperCAmelCase_ = self.alphas_cumprod[t - 1] if t > 0 else self.one UpperCAmelCase_ = 1 - alpha_prod_t UpperCAmelCase_ = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": UpperCAmelCase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": UpperCAmelCase_ = model_output else: raise ValueError(F"""Unsupported prediction_type {prediction_type}.""" ) # 3. Clip "predicted x_0" UpperCAmelCase_ = self.bit_scale if self.config.clip_sample: UpperCAmelCase_ = torch.clamp(__A , -scale , __A ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCAmelCase_ = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t UpperCAmelCase_ = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCAmelCase_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise UpperCAmelCase_ = 0 if t > 0: UpperCAmelCase_ = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=__A ).to(model_output.device ) UpperCAmelCase_ = (self._get_variance(__A , predicted_variance=__A ) ** 0.5) * noise UpperCAmelCase_ = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=__A , pred_original_sample=__A ) class __snake_case ( a ): def __init__( self : Union[str, Any] , _snake_case : UNetaDConditionModel , _snake_case : Union[DDIMScheduler, DDPMScheduler] , _snake_case : Optional[float] = 1.0 , ): """simple docstring""" super().__init__() UpperCAmelCase_ = bit_scale UpperCAmelCase_ = ( ddim_bit_scheduler_step if isinstance(_snake_case , _snake_case) else ddpm_bit_scheduler_step ) self.register_modules(unet=_snake_case , scheduler=_snake_case) @torch.no_grad() def __call__( self : Union[str, Any] , _snake_case : Optional[int] = 256 , _snake_case : Optional[int] = 256 , _snake_case : Optional[int] = 50 , _snake_case : Optional[torch.Generator] = None , _snake_case : Optional[int] = 1 , _snake_case : Optional[str] = "pil" , _snake_case : bool = True , **_snake_case : Optional[Any] , ): """simple docstring""" UpperCAmelCase_ = torch.randn( (batch_size, self.unet.config.in_channels, height, width) , generator=_snake_case , ) UpperCAmelCase_ = decimal_to_bits(_snake_case) * self.bit_scale UpperCAmelCase_ = latents.to(self.device) self.scheduler.set_timesteps(_snake_case) for t in self.progress_bar(self.scheduler.timesteps): # predict the noise residual UpperCAmelCase_ = self.unet(_snake_case , _snake_case).sample # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase_ = self.scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample UpperCAmelCase_ = bits_to_decimal(_snake_case) if output_type == "pil": UpperCAmelCase_ = self.numpy_to_pil(_snake_case) if not return_dict: return (image,) return ImagePipelineOutput(images=_snake_case)
51
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __UpperCamelCase : List[str] = { "configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"], "tokenization_roformer": ["RoFormerTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : str = ["RoFormerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = [ "ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "RoFormerForCausalLM", "RoFormerForMaskedLM", "RoFormerForMultipleChoice", "RoFormerForQuestionAnswering", "RoFormerForSequenceClassification", "RoFormerForTokenClassification", "RoFormerLayer", "RoFormerModel", "RoFormerPreTrainedModel", "load_tf_weights_in_roformer", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : List[Any] = [ "TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRoFormerForCausalLM", "TFRoFormerForMaskedLM", "TFRoFormerForMultipleChoice", "TFRoFormerForQuestionAnswering", "TFRoFormerForSequenceClassification", "TFRoFormerForTokenClassification", "TFRoFormerLayer", "TFRoFormerModel", "TFRoFormerPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Any = [ "FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaxRoFormerForMaskedLM", "FlaxRoFormerForMultipleChoice", "FlaxRoFormerForQuestionAnswering", "FlaxRoFormerForSequenceClassification", "FlaxRoFormerForTokenClassification", "FlaxRoFormerModel", "FlaxRoFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys __UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
352
import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __lowerCAmelCase : def __init__( self :Optional[int] , __magic_name__ :str , __magic_name__ :int=2 , __magic_name__ :List[str]=3 , __magic_name__ :Optional[int]=4 , __magic_name__ :str=2 , __magic_name__ :Any=7 , __magic_name__ :Optional[Any]=True , __magic_name__ :List[Any]=True , __magic_name__ :Optional[int]=True , __magic_name__ :Dict=True , __magic_name__ :List[Any]=99 , __magic_name__ :Dict=36 , __magic_name__ :Optional[Any]=3 , __magic_name__ :str=4 , __magic_name__ :Optional[Any]=37 , __magic_name__ :Dict="gelu" , __magic_name__ :Any=0.1 , __magic_name__ :Union[str, Any]=0.1 , __magic_name__ :Dict=512 , __magic_name__ :str=16 , __magic_name__ :List[Any]=2 , __magic_name__ :Tuple=0.02 , __magic_name__ :Any=6 , __magic_name__ :Optional[int]=6 , __magic_name__ :Tuple=3 , __magic_name__ :str=4 , __magic_name__ :List[str]=None , __magic_name__ :str=1000 , ): '''simple docstring''' a = parent a = batch_size a = num_channels a = image_size a = patch_size a = text_seq_length a = is_training a = use_input_mask a = use_token_type_ids a = use_labels a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = type_sequence_label_size a = initializer_range a = coordinate_size a = shape_size a = num_labels a = num_choices a = scope a = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) a = text_seq_length a = (image_size // patch_size) ** 2 + 1 a = self.text_seq_length + self.image_seq_length def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) a = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: a = bbox[i, j, 3] a = bbox[i, j, 1] a = t if bbox[i, j, 2] < bbox[i, j, 0]: a = bbox[i, j, 2] a = bbox[i, j, 0] a = t a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a = None if self.use_input_mask: a = random_attention_mask([self.batch_size, self.text_seq_length] ) a = None if self.use_token_type_ids: a = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) a = None a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) a = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def lowerCamelCase__ ( self :int , __magic_name__ :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :List[str] , __magic_name__ :int , __magic_name__ :Tuple , __magic_name__ :int , __magic_name__ :Tuple , __magic_name__ :int ): '''simple docstring''' a = LayoutLMvaModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() # text + image a = model(__magic_name__ , pixel_values=__magic_name__ ) a = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ ) a = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , token_type_ids=__magic_name__ ) a = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only a = model(__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only a = model(pixel_values=__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :List[Any] , __magic_name__ :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Union[str, Any] , __magic_name__ :int , __magic_name__ :Optional[int] , __magic_name__ :List[str] , __magic_name__ :List[str] ): '''simple docstring''' a = self.num_labels a = LayoutLMvaForSequenceClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() a = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self :Tuple , __magic_name__ :Dict , __magic_name__ :Optional[Any] , __magic_name__ :Optional[Any] , __magic_name__ :Dict , __magic_name__ :Dict , __magic_name__ :int , __magic_name__ :List[str] , __magic_name__ :Tuple ): '''simple docstring''' a = self.num_labels a = LayoutLMvaForTokenClassification(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() a = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Tuple , __magic_name__ :str , __magic_name__ :int , __magic_name__ :Optional[int] , __magic_name__ :str , __magic_name__ :List[str] , __magic_name__ :Optional[int] , __magic_name__ :Optional[Any] ): '''simple docstring''' a = LayoutLMvaForQuestionAnswering(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() a = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ) = config_and_inputs a = { """input_ids""": input_ids, """bbox""": bbox, """pixel_values""": pixel_values, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ): UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) UpperCamelCase__ = ( {'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel} if is_torch_available() else {} ) def lowerCamelCase__ ( self :List[str] , __magic_name__ :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :Optional[Any] , __magic_name__ :List[str] , __magic_name__ :List[Any] ): '''simple docstring''' return True def lowerCamelCase__ ( self :int ): '''simple docstring''' a = LayoutLMvaModelTester(self ) a = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :Dict , __magic_name__ :Dict , __magic_name__ :Any=False ): '''simple docstring''' a = copy.deepcopy(__magic_name__ ) if model_class in get_values(__magic_name__ ): a = { k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous() if isinstance(__magic_name__ , torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__magic_name__ ): a = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) elif model_class in get_values(__magic_name__ ): a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) elif model_class in [ *get_values(__magic_name__ ), ]: a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) elif model_class in [ *get_values(__magic_name__ ), ]: a = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__magic_name__ , ) return inputs_dict def lowerCamelCase__ ( self :str ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: a = type self.model_tester.create_and_check_model(*__magic_name__ ) def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ ) def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__magic_name__ ) def lowerCamelCase__ ( self :int ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__magic_name__ ) @slow def lowerCamelCase__ ( self :int ): '''simple docstring''' for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a = LayoutLMvaModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def __A ( ) -> str: a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch class __lowerCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=__magic_name__ ) if is_vision_available() else None @slow def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' a = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(__magic_name__ ) a = self.default_image_processor a = prepare_img() a = image_processor(images=__magic_name__ , return_tensors="""pt""" ).pixel_values.to(__magic_name__ ) a = torch.tensor([[1, 2]] ) a = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass a = model( input_ids=input_ids.to(__magic_name__ ) , bbox=bbox.to(__magic_name__ ) , pixel_values=pixel_values.to(__magic_name__ ) , ) # verify the logits a = torch.Size((1, 199, 768) ) self.assertEqual(outputs.last_hidden_state.shape , __magic_name__ ) a = torch.tensor( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
347
0
'''simple docstring''' def _A ( A__ , A__ , A__ ): """simple docstring""" def update_area_of_max_square(A__ , A__ ) -> int: # BASE CASE if row >= rows or col >= cols: return 0 __lowercase = update_area_of_max_square(A__ , col + 1 ) __lowercase = update_area_of_max_square(row + 1 , col + 1 ) __lowercase = update_area_of_max_square(row + 1 , A__ ) if mat[row][col]: __lowercase = 1 + min([right, diagonal, down] ) __lowercase = max(largest_square_area[0] , A__ ) return sub_problem_sol else: return 0 __lowercase = [0] update_area_of_max_square(0 , 0 ) return largest_square_area[0] def _A ( A__ , A__ , A__ ): """simple docstring""" def update_area_of_max_square_using_dp_array( A__ , A__ , A__ ) -> int: if row >= rows or col >= cols: return 0 if dp_array[row][col] != -1: return dp_array[row][col] __lowercase = update_area_of_max_square_using_dp_array(A__ , col + 1 , A__ ) __lowercase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , A__ ) __lowercase = update_area_of_max_square_using_dp_array(row + 1 , A__ , A__ ) if mat[row][col]: __lowercase = 1 + min([right, diagonal, down] ) __lowercase = max(largest_square_area[0] , A__ ) __lowercase = sub_problem_sol return sub_problem_sol else: return 0 __lowercase = [0] __lowercase = [[-1] * cols for _ in range(A__ )] update_area_of_max_square_using_dp_array(0 , 0 , A__ ) return largest_square_area[0] def _A ( A__ , A__ , A__ ): """simple docstring""" __lowercase = [[0] * (cols + 1) for _ in range(rows + 1 )] __lowercase = 0 for row in range(rows - 1 , -1 , -1 ): for col in range(cols - 1 , -1 , -1 ): __lowercase = dp_array[row][col + 1] __lowercase = dp_array[row + 1][col + 1] __lowercase = dp_array[row + 1][col] if mat[row][col] == 1: __lowercase = 1 + min(A__ , A__ , A__ ) __lowercase = max(dp_array[row][col] , A__ ) else: __lowercase = 0 return largest_square_area def _A ( A__ , A__ , A__ ): """simple docstring""" __lowercase = [0] * (cols + 1) __lowercase = [0] * (cols + 1) __lowercase = 0 for row in range(rows - 1 , -1 , -1 ): for col in range(cols - 1 , -1 , -1 ): __lowercase = current_row[col + 1] __lowercase = next_row[col + 1] __lowercase = next_row[col] if mat[row][col] == 1: __lowercase = 1 + min(A__ , A__ , A__ ) __lowercase = max(current_row[col] , A__ ) else: __lowercase = 0 __lowercase = current_row return largest_square_area if __name__ == "__main__": import doctest doctest.testmod() print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
104
'''simple docstring''' # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import subprocess from packaging.version import Version, parse from accelerate.commands.config.config_args import default_config_file, load_config_from_file lowerCAmelCase__ = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.''' def _A ( A__=None ): """simple docstring""" if subparsers is not None: __lowercase = subparsers.add_parser('''tpu-config''' , description=_description ) else: __lowercase = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description ) # Core arguments __lowercase = parser.add_argument_group( '''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' ) config_args.add_argument( '''--config_file''' , type=A__ , default=A__ , help='''Path to the config file to use for accelerate.''' , ) config_args.add_argument( '''--tpu_name''' , default=A__ , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , ) config_args.add_argument( '''--tpu_zone''' , default=A__ , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , ) __lowercase = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' ) pod_args.add_argument( '''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , ) pod_args.add_argument( '''--command_file''' , default=A__ , help='''The path to the file containing the commands to run on the pod on startup.''' , ) pod_args.add_argument( '''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , ) pod_args.add_argument( '''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , ) pod_args.add_argument( '''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , ) pod_args.add_argument( '''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' ) if subparsers is not None: parser.set_defaults(func=A__ ) return parser def _A ( A__ ): """simple docstring""" __lowercase = None # Get the default from the config file if it exists. if args.config_file is not None or os.path.isfile(A__ ): __lowercase = load_config_from_file(args.config_file ) if not args.command_file and defaults.command_file is not None and not args.command: __lowercase = defaults.command_file if not args.command and defaults.commands is not None: __lowercase = defaults.commands if not args.tpu_name: __lowercase = defaults.tpu_name if not args.tpu_zone: __lowercase = defaults.tpu_zone if args.accelerate_version == "dev": __lowercase = '''git+https://github.com/huggingface/accelerate.git''' elif args.accelerate_version == "latest": __lowercase = '''accelerate -U''' elif isinstance(parse(args.accelerate_version ) , A__ ): __lowercase = F"accelerate=={args.accelerate_version}" if not args.command_file and not args.command: raise ValueError('''You must specify either a command file or a command to run on the pod.''' ) if args.command_file: with open(args.command_file , '''r''' ) as f: __lowercase = [f.read().splitlines()] # To turn list of lists into list of strings if isinstance(args.command[0] , A__ ): __lowercase = [line for cmd in args.command for line in cmd] # Default to the shared folder and install accelerate __lowercase = ['''cd /usr/share'''] if args.install_accelerate: new_cmd += [F"pip install {args.accelerate_version}"] new_cmd += args.command __lowercase = '''; '''.join(A__ ) # Then send it to gcloud # Eventually try to use google-api-core to do this instead of subprocess __lowercase = ['''gcloud'''] if args.use_alpha: cmd += ["alpha"] cmd += [ "compute", "tpus", "tpu-vm", "ssh", args.tpu_name, "--zone", args.tpu_zone, "--command", args.command, "--worker", "all", ] if args.debug: print(F"Running {' '.join(A__ )}" ) return subprocess.run(A__ ) print('''Successfully setup pod.''' ) def _A ( ): """simple docstring""" __lowercase = tpu_command_parser() __lowercase = parser.parse_args() tpu_command_launcher(A__ )
104
1
'''simple docstring''' from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def __lowercase ( __lowercase ) -> str: '''simple docstring''' return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )} def __lowercase ( ) -> Tuple: '''simple docstring''' _A = ArgumentParser( "HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=__lowercase ) _A = parser.add_subparsers(help="datasets-cli command helpers" ) set_verbosity_info() # Register commands ConvertCommand.register_subcommand(__lowercase ) EnvironmentCommand.register_subcommand(__lowercase ) TestCommand.register_subcommand(__lowercase ) RunBeamCommand.register_subcommand(__lowercase ) DummyDataCommand.register_subcommand(__lowercase ) # Parse args _A , _A = parser.parse_known_args() if not hasattr(__lowercase , "func" ): parser.print_help() exit(1 ) _A = parse_unknown_args(__lowercase ) # Run _A = args.func(__lowercase , **__lowercase ) service.run() if __name__ == "__main__": main()
174
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) lowerCamelCase_ = { '''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''], '''processing_trocr''': ['''TrOCRProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TrOCRForCausalLM''', '''TrOCRPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
174
1
'''simple docstring''' import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 SCREAMING_SNAKE_CASE_: List[str] =data_utils.TransfoXLTokenizer SCREAMING_SNAKE_CASE_: str =data_utils.TransfoXLCorpus SCREAMING_SNAKE_CASE_: List[str] =data_utils SCREAMING_SNAKE_CASE_: Optional[Any] =data_utils def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : int ) -> Optional[Any]: '''simple docstring''' if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(snake_case_ , "rb" ) as fp: UpperCAmelCase_ = pickle.load(snake_case_ , encoding="latin1" ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) UpperCAmelCase_ = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"] print(f"""Save vocabulary to {pytorch_vocab_dump_path}""" ) UpperCAmelCase_ = corpus.vocab.__dict__ torch.save(snake_case_ , snake_case_ ) UpperCAmelCase_ = corpus.__dict__ corpus_dict_no_vocab.pop("vocab" , snake_case_ ) UpperCAmelCase_ = pytorch_dump_folder_path + "/" + CORPUS_NAME print(f"""Save dataset to {pytorch_dataset_dump_path}""" ) torch.save(snake_case_ , snake_case_ ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model UpperCAmelCase_ = os.path.abspath(snake_case_ ) UpperCAmelCase_ = os.path.abspath(snake_case_ ) print(f"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" ) # Initialise PyTorch model if transfo_xl_config_file == "": UpperCAmelCase_ = TransfoXLConfig() else: UpperCAmelCase_ = TransfoXLConfig.from_json_file(snake_case_ ) print(f"""Building PyTorch model from configuration: {config}""" ) UpperCAmelCase_ = TransfoXLLMHeadModel(snake_case_ ) UpperCAmelCase_ = load_tf_weights_in_transfo_xl(snake_case_ , snake_case_ , snake_case_ ) # Save pytorch-model UpperCAmelCase_ = os.path.join(snake_case_ , snake_case_ ) UpperCAmelCase_ = os.path.join(snake_case_ , snake_case_ ) print(f"""Save PyTorch model to {os.path.abspath(snake_case_ )}""" ) torch.save(model.state_dict() , snake_case_ ) print(f"""Save configuration file to {os.path.abspath(snake_case_ )}""" ) with open(snake_case_ , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_: str =argparse.ArgumentParser() parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the folder to store the PyTorch model or dataset/vocab.', ) parser.add_argument( '--tf_checkpoint_path', default='', type=str, help='An optional path to a TensorFlow checkpoint path to be converted.', ) parser.add_argument( '--transfo_xl_config_file', default='', type=str, help=( 'An optional config json file corresponding to the pre-trained BERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--transfo_xl_dataset_file', default='', type=str, help='An optional dataset file to be converted in a vocabulary.', ) SCREAMING_SNAKE_CASE_: Dict =parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class UpperCAmelCase ( metaclass=UpperCamelCase__ ): __lowercase = ["""note_seq"""] def __init__( self :Optional[Any] , *lowercase_ :List[Any] , **lowercase_ :List[str] )-> int: requires_backends(self , ["note_seq"] ) @classmethod def UpperCAmelCase_ ( cls :str , *lowercase_ :Union[str, Any] , **lowercase_ :Any )-> Optional[int]: requires_backends(cls , ["note_seq"] ) @classmethod def UpperCAmelCase_ ( cls :Dict , *lowercase_ :Tuple , **lowercase_ :List[Any] )-> Optional[Any]: requires_backends(cls , ["note_seq"] )
237
0
"""simple docstring""" import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def UpperCamelCase ( __lowercase : Tuple ): '''simple docstring''' if isinstance(__lowercase ,collections.abc.Iterable ): return x return (x, x) @require_flax class UpperCAmelCase : '''simple docstring''' def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" pass def lowerCAmelCase_ ( self ): """simple docstring""" pass def lowerCAmelCase_ ( self ): """simple docstring""" pass def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ): """simple docstring""" A_ : Optional[Any] = np.abs((a - b) ).max() self.assertLessEqual(lowercase , lowercase , F'''Difference between torch and flax is {diff} (>= {tol}).''' ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase ): """simple docstring""" A_ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase , lowercase ) A_ : Dict = FlaxVisionTextDualEncoderModel(lowercase ) A_ : Optional[int] = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase ) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase ): """simple docstring""" A_ : Dict = self.get_vision_text_model(lowercase , lowercase ) A_ : Tuple = {'vision_model': vision_model, 'text_model': text_model} A_ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase ) A_ : Union[str, Any] = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase ) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase ): """simple docstring""" A_ : Union[str, Any] = self.get_vision_text_model(lowercase , lowercase ) A_ : str = {'vision_model': vision_model, 'text_model': text_model} A_ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase ) A_ : str = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase ) A_ : List[str] = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowercase ) A_ : List[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase ) A_ : Optional[int] = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase ) A_ : Tuple = after_output[0] A_ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowercase , 1E-3 ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase ): """simple docstring""" A_ : str = self.get_vision_text_model(lowercase , lowercase ) A_ : Dict = {'vision_model': vision_model, 'text_model': text_model} A_ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase ) A_ : Optional[int] = model( input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase , output_attentions=lowercase ) A_ : int = output.vision_model_output.attentions self.assertEqual(len(lowercase ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) A_ : int = to_atuple(vision_model.config.image_size ) A_ : int = to_atuple(vision_model.config.patch_size ) A_ : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) A_ : Union[str, Any] = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) A_ : Optional[int] = output.text_model_output.attentions self.assertEqual(len(lowercase ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ): """simple docstring""" pt_model.to(lowercase ) pt_model.eval() # prepare inputs A_ : int = inputs_dict A_ : Optional[Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): A_ : int = pt_model(**lowercase ).to_tuple() A_ : Optional[int] = fx_model(**lowercase ).to_tuple() self.assertEqual(len(lowercase ) , len(lowercase ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowercase , pt_output.numpy() , 4E-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowercase ) A_ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase , from_pt=lowercase ) A_ : Union[str, Any] = fx_model_loaded(**lowercase ).to_tuple() self.assertEqual(len(lowercase ) , len(lowercase ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowercase , pt_output.numpy() , 4E-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowercase ) A_ : List[Any] = VisionTextDualEncoderModel.from_pretrained(lowercase , from_flax=lowercase ) pt_model_loaded.to(lowercase ) pt_model_loaded.eval() with torch.no_grad(): A_ : Tuple = pt_model_loaded(**lowercase ).to_tuple() self.assertEqual(len(lowercase ) , len(lowercase ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(lowercase , pt_output_loaded.numpy() , 4E-2 ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ): """simple docstring""" A_ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase , lowercase ) A_ : Any = VisionTextDualEncoderModel(lowercase ) A_ : Tuple = FlaxVisionTextDualEncoderModel(lowercase ) A_ : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowercase ) A_ : Union[str, Any] = fx_state self.check_pt_flax_equivalence(lowercase , lowercase , lowercase ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ): """simple docstring""" A_ : List[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase , lowercase ) A_ : Dict = VisionTextDualEncoderModel(lowercase ) A_ : Optional[Any] = FlaxVisionTextDualEncoderModel(lowercase ) A_ : Any = load_flax_weights_in_pytorch_model(lowercase , fx_model.params ) self.check_pt_flax_equivalence(lowercase , lowercase , lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Union[str, Any] = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Union[str, Any] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[Any] = self.prepare_config_and_inputs() self.check_save_load(**lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[Any] = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**lowercase ) @is_pt_flax_cross_test def lowerCAmelCase_ ( self ): """simple docstring""" A_ : str = self.prepare_config_and_inputs() A_ : Union[str, Any] = config_inputs_dict.pop('vision_config' ) A_ : List[Any] = config_inputs_dict.pop('text_config' ) A_ : int = config_inputs_dict self.check_equivalence_pt_to_flax(lowercase , lowercase , lowercase ) self.check_equivalence_flax_to_pt(lowercase , lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = self.get_pretrained_model_and_inputs() A_ : Union[str, Any] = model_a(**lowercase ) A_ : List[str] = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(lowercase ) A_ : List[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase ) A_ : Any = model_a(**lowercase ) A_ : int = after_outputs[0] A_ : Dict = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowercase , 1E-5 ) @require_flax class UpperCAmelCase ( __A , unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( 'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-bert' , vision_from_pt=lowercase , text_from_pt=lowercase , ) A_ : Dict = 1_3 A_ : List[str] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) A_ : int = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) A_ : Optional[int] = random_attention_mask([batch_size, 4] ) A_ : Optional[int] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" A_ : Union[str, Any] = FlaxViTModel(lowercase ) A_ : str = FlaxBertModel(lowercase ) return vision_model, text_model def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Dict = FlaxViTModelTester(self ) A_ : Optional[int] = FlaxBertModelTester(self ) A_ : Optional[Any] = vit_model_tester.prepare_config_and_inputs() A_ : Dict = bert_model_tester.prepare_config_and_inputs() A_ : Dict = vision_config_and_inputs A_ : Optional[int] = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class UpperCAmelCase ( __A , unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( 'hf-internal-testing/tiny-random-clip' , 'hf-internal-testing/tiny-bert' , vision_from_pt=lowercase , text_from_pt=lowercase , ) A_ : List[str] = 1_3 A_ : Union[str, Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) A_ : List[str] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) A_ : Optional[Any] = random_attention_mask([batch_size, 4] ) A_ : Union[str, Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" A_ : Optional[int] = FlaxCLIPVisionModel(lowercase ) A_ : int = FlaxBertModel(lowercase ) return vision_model, text_model def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[str] = FlaxCLIPVisionModelTester(self ) A_ : List[str] = FlaxBertModelTester(self ) A_ : int = clip_model_tester.prepare_config_and_inputs() A_ : Any = bert_model_tester.prepare_config_and_inputs() A_ : Union[str, Any] = vision_config_and_inputs A_ : List[str] = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase_ ( self ): """simple docstring""" A_ : str = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' , logit_scale_init_value=1.0 ) A_ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' ) A_ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) A_ : int = processor( text=['una foto di un gatto', 'una foto di un cane'] , images=lowercase , padding=lowercase , return_tensors='np' ) A_ : Optional[int] = model(**lowercase ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) A_ : List[Any] = np.array([[1.228_4727, 0.310_4122]] ) self.assertTrue(np.allclose(outputs.logits_per_image , lowercase , atol=1E-3 ) )
353
import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort _UpperCAmelCase = logging.get_logger(__name__) _UpperCAmelCase = { """tensor(bool)""": np.bool_, """tensor(int8)""": np.inta, """tensor(uint8)""": np.uinta, """tensor(int16)""": np.intaa, """tensor(uint16)""": np.uintaa, """tensor(int32)""": np.intaa, """tensor(uint32)""": np.uintaa, """tensor(int64)""": np.intaa, """tensor(uint64)""": np.uintaa, """tensor(float16)""": np.floataa, """tensor(float)""": np.floataa, """tensor(double)""": np.floataa, } class UpperCAmelCase : '''simple docstring''' def __init__( self , lowercase=None , **lowercase ): """simple docstring""" logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' ) A_ : List[Any] = model A_ : Dict = kwargs.get('model_save_dir' , lowercase ) A_ : List[str] = kwargs.get('latest_model_name' , lowercase ) def __call__( self , **lowercase ): """simple docstring""" A_ : str = {k: np.array(lowercase ) for k, v in kwargs.items()} return self.model.run(lowercase , lowercase ) @staticmethod def lowerCAmelCase_ ( lowercase , lowercase=None , lowercase=None ): """simple docstring""" if provider is None: logger.info('No onnxruntime provider specified, using CPUExecutionProvider' ) A_ : List[Any] = 'CPUExecutionProvider' return ort.InferenceSession(lowercase , providers=[provider] , sess_options=lowercase ) def lowerCAmelCase_ ( self , lowercase , lowercase = None , **lowercase ): """simple docstring""" A_ : str = file_name if file_name is not None else ONNX_WEIGHTS_NAME A_ : Optional[int] = self.model_save_dir.joinpath(self.latest_model_name ) A_ : int = Path(lowercase ).joinpath(lowercase ) try: shutil.copyfile(lowercase , lowercase ) except shutil.SameFileError: pass # copy external weights (for models >2GB) A_ : Optional[Any] = self.model_save_dir.joinpath(lowercase ) if src_path.exists(): A_ : int = Path(lowercase ).joinpath(lowercase ) try: shutil.copyfile(lowercase , lowercase ) except shutil.SameFileError: pass def lowerCAmelCase_ ( self , lowercase , **lowercase , ): """simple docstring""" if os.path.isfile(lowercase ): logger.error(F'''Provided path ({save_directory}) should be a directory, not a file''' ) return os.makedirs(lowercase , exist_ok=lowercase ) # saving model weights/files self._save_pretrained(lowercase , **lowercase ) @classmethod def lowerCAmelCase_ ( cls , lowercase , lowercase = None , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = None , lowercase = None , **lowercase , ): """simple docstring""" A_ : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(lowercase ): A_ : Optional[int] = OnnxRuntimeModel.load_model( os.path.join(lowercase , lowercase ) , provider=lowercase , sess_options=lowercase ) A_ : Dict = Path(lowercase ) # load model from hub else: # download model A_ : List[str] = hf_hub_download( repo_id=lowercase , filename=lowercase , use_auth_token=lowercase , revision=lowercase , cache_dir=lowercase , force_download=lowercase , ) A_ : int = Path(lowercase ).parent A_ : Optional[Any] = Path(lowercase ).name A_ : Any = OnnxRuntimeModel.load_model(lowercase , provider=lowercase , sess_options=lowercase ) return cls(model=lowercase , **lowercase ) @classmethod def lowerCAmelCase_ ( cls , lowercase , lowercase = True , lowercase = None , lowercase = None , **lowercase , ): """simple docstring""" A_ : List[Any] = None if len(str(lowercase ).split('@' ) ) == 2: A_ , A_ : int = model_id.split('@' ) return cls._from_pretrained( model_id=lowercase , revision=lowercase , cache_dir=lowercase , force_download=lowercase , use_auth_token=lowercase , **lowercase , )
192
0
def a__ ( snake_case , snake_case ): """simple docstring""" def get_matched_characters(snake_case , snake_case ) -> str: __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : int = min(len(_stra ) , len(_stra ) ) // 2 for i, l in enumerate(_stra ): __SCREAMING_SNAKE_CASE : Optional[int] = int(max(0 , i - limit ) ) __SCREAMING_SNAKE_CASE : Any = int(min(i + limit + 1 , len(_stra ) ) ) if l in _stra[left:right]: matched.append(__snake_case ) __SCREAMING_SNAKE_CASE : List[Any] = F'''{_stra[0:_stra.index(__snake_case )]} {_stra[_stra.index(__snake_case ) + 1:]}''' return "".join(__snake_case ) # matching characters __SCREAMING_SNAKE_CASE : Dict = get_matched_characters(__snake_case , __snake_case ) __SCREAMING_SNAKE_CASE : Any = get_matched_characters(__snake_case , __snake_case ) __SCREAMING_SNAKE_CASE : Optional[int] = len(__snake_case ) # transposition __SCREAMING_SNAKE_CASE : Tuple = ( len([(ca, ca) for ca, ca in zip(__snake_case , __snake_case ) if ca != ca] ) // 2 ) if not match_count: __SCREAMING_SNAKE_CASE : List[Any] = 0.0 else: __SCREAMING_SNAKE_CASE : List[Any] = ( 1 / 3 * ( match_count / len(__snake_case ) + match_count / len(__snake_case ) + (match_count - transpositions) / match_count ) ) # common prefix up to 4 characters __SCREAMING_SNAKE_CASE : Optional[int] = 0 for ca, ca in zip(stra[:4] , stra[:4] ): if ca == ca: prefix_len += 1 else: break return jaro + 0.1 * prefix_len * (1 - jaro) if __name__ == "__main__": import doctest doctest.testmod() print(jaro_winkler("""hello""", """world"""))
303
"""simple docstring""" from __future__ import annotations def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ) -> list: __lowerCAmelCase : Dict = [] __lowerCAmelCase , __lowerCAmelCase : Any = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) __lowerCAmelCase : int = result + left + right return input_list def _lowercase ( __snake_case ) -> list: if len(__snake_case ) <= 1: return input_list __lowerCAmelCase : int = list(__snake_case ) # iteration for two-way merging __lowerCAmelCase : Optional[int] = 2 while p <= len(__snake_case ): # getting low, high and middle value for merge-sort of single list for i in range(0 ,len(__snake_case ) ,__snake_case ): __lowerCAmelCase : Union[str, Any] = i __lowerCAmelCase : Tuple = i + p - 1 __lowerCAmelCase : Optional[Any] = (low + high + 1) // 2 __lowerCAmelCase : Any = merge(__snake_case ,__snake_case ,__snake_case ,__snake_case ) # final merge of last two parts if p * 2 >= len(__snake_case ): __lowerCAmelCase : Optional[Any] = i __lowerCAmelCase : Union[str, Any] = merge(__snake_case ,0 ,__snake_case ,len(__snake_case ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": __snake_case : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip() if user_input == "": __snake_case : Optional[int] = [] else: __snake_case : int = [int(item.strip()) for item in user_input.split(',')] print(iter_merge_sort(unsorted))
269
0
"""simple docstring""" class _lowerCAmelCase : """simple docstring""" def __init__( self : int, UpperCAmelCase__ : Any ): __lowercase = size __lowercase = [0] * size __lowercase = [0] * size @staticmethod def _lowercase ( UpperCAmelCase__ : Union[str, Any] ): return index | (index + 1) @staticmethod def _lowercase ( UpperCAmelCase__ : Optional[int] ): return (index & (index + 1)) - 1 def _lowercase ( self : Optional[int], UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : List[str] ): __lowercase = value while index < self.size: __lowercase = self.get_prev(lowerCAmelCase__ ) + 1 if current_left_border == index: __lowercase = value else: __lowercase = max(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ ) __lowercase = self.get_next(lowerCAmelCase__ ) def _lowercase ( self : int, UpperCAmelCase__ : List[str], UpperCAmelCase__ : str ): right -= 1 # Because of right is exclusive __lowercase = 0 while left <= right: __lowercase = self.get_prev(lowerCAmelCase__ ) if left <= current_left: __lowercase = max(lowerCAmelCase__, self.tree[right] ) __lowercase = current_left else: __lowercase = max(lowerCAmelCase__, self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
366
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _a = logging.get_logger(__name__) class _lowerCAmelCase ( lowercase ): """simple docstring""" __UpperCAmelCase : Optional[Any] = ["pixel_values"] def __init__( self : int, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Dict[str, int]] = None, UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Dict[str, int] = None, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Union[int, float] = 1 / 2_5_5, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Union[float, List[float]]] = None, UpperCAmelCase__ : Optional[Union[float, List[float]]] = None, **UpperCAmelCase__ : str, ): super().__init__(**UpperCAmelCase__ ) __lowercase = size if size is not None else {"shortest_edge": 2_5_6} __lowercase = get_size_dict(UpperCAmelCase__, default_to_square=UpperCAmelCase__ ) __lowercase = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4} __lowercase = get_size_dict(UpperCAmelCase__ ) __lowercase = do_resize __lowercase = size __lowercase = resample __lowercase = do_center_crop __lowercase = crop_size __lowercase = do_rescale __lowercase = rescale_factor __lowercase = do_normalize __lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowercase ( self : int, UpperCAmelCase__ : np.ndarray, UpperCAmelCase__ : Dict[str, int], UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC, UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None, **UpperCAmelCase__ : Dict, ): __lowercase = get_size_dict(UpperCAmelCase__, default_to_square=UpperCAmelCase__ ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) __lowercase = get_resize_output_image_size(UpperCAmelCase__, size=size["shortest_edge"], default_to_square=UpperCAmelCase__ ) return resize(UpperCAmelCase__, size=UpperCAmelCase__, resample=UpperCAmelCase__, data_format=UpperCAmelCase__, **UpperCAmelCase__ ) def _lowercase ( self : Dict, UpperCAmelCase__ : np.ndarray, UpperCAmelCase__ : Dict[str, int], UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None, **UpperCAmelCase__ : Dict, ): __lowercase = get_size_dict(UpperCAmelCase__ ) return center_crop(UpperCAmelCase__, size=(size["height"], size["width"]), data_format=UpperCAmelCase__, **UpperCAmelCase__ ) def _lowercase ( self : Optional[int], UpperCAmelCase__ : np.ndarray, UpperCAmelCase__ : float, UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None, **UpperCAmelCase__ : Union[str, Any] ): return rescale(UpperCAmelCase__, scale=UpperCAmelCase__, data_format=UpperCAmelCase__, **UpperCAmelCase__ ) def _lowercase ( self : List[Any], UpperCAmelCase__ : np.ndarray, UpperCAmelCase__ : Union[float, List[float]], UpperCAmelCase__ : Union[float, List[float]], UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None, **UpperCAmelCase__ : int, ): return normalize(UpperCAmelCase__, mean=UpperCAmelCase__, std=UpperCAmelCase__, data_format=UpperCAmelCase__, **UpperCAmelCase__ ) def _lowercase ( self : Any, UpperCAmelCase__ : ImageInput, UpperCAmelCase__ : Optional[bool] = None, UpperCAmelCase__ : Dict[str, int] = None, UpperCAmelCase__ : PILImageResampling = None, UpperCAmelCase__ : bool = None, UpperCAmelCase__ : Dict[str, int] = None, UpperCAmelCase__ : Optional[bool] = None, UpperCAmelCase__ : Optional[float] = None, UpperCAmelCase__ : Optional[bool] = None, UpperCAmelCase__ : Optional[Union[float, List[float]]] = None, UpperCAmelCase__ : Optional[Union[float, List[float]]] = None, UpperCAmelCase__ : Optional[Union[str, TensorType]] = None, UpperCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST, **UpperCAmelCase__ : Optional[int], ): __lowercase = do_resize if do_resize is not None else self.do_resize __lowercase = size if size is not None else self.size __lowercase = get_size_dict(UpperCAmelCase__, default_to_square=UpperCAmelCase__ ) __lowercase = resample if resample is not None else self.resample __lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop __lowercase = crop_size if crop_size is not None else self.crop_size __lowercase = get_size_dict(UpperCAmelCase__ ) __lowercase = do_rescale if do_rescale is not None else self.do_rescale __lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor __lowercase = do_normalize if do_normalize is not None else self.do_normalize __lowercase = image_mean if image_mean is not None else self.image_mean __lowercase = image_std if image_std is not None else self.image_std __lowercase = make_list_of_images(UpperCAmelCase__ ) if not valid_images(UpperCAmelCase__ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. __lowercase = [to_numpy_array(UpperCAmelCase__ ) for image in images] if do_resize: __lowercase = [self.resize(image=UpperCAmelCase__, size=UpperCAmelCase__, resample=UpperCAmelCase__ ) for image in images] if do_center_crop: __lowercase = [self.center_crop(image=UpperCAmelCase__, size=UpperCAmelCase__ ) for image in images] if do_rescale: __lowercase = [self.rescale(image=UpperCAmelCase__, scale=UpperCAmelCase__ ) for image in images] if do_normalize: __lowercase = [self.normalize(image=UpperCAmelCase__, mean=UpperCAmelCase__, std=UpperCAmelCase__ ) for image in images] __lowercase = [to_channel_dimension_format(UpperCAmelCase__, UpperCAmelCase__ ) for image in images] __lowercase = {"pixel_values": images} return BatchFeature(data=UpperCAmelCase__, tensor_type=UpperCAmelCase__ )
144
0
class a : def __init__( self :Any ,__lowercase :Optional[Any] ): # we need a list not a string, so do something to change the type snake_case__ : List[Any] = arr.split(''',''' ) def __lowerCamelCase ( self :Any ): snake_case__ : List[Any] = [int(self.array[0] )] * len(self.array ) snake_case__ : List[str] = [int(self.array[0] )] * len(self.array ) for i in range(1 ,len(self.array ) ): snake_case__ : List[Any] = max( int(self.array[i] ) + sum_value[i - 1] ,int(self.array[i] ) ) snake_case__ : List[str] = max(sum_value[i] ,rear[i - 1] ) return rear[len(self.array ) - 1] if __name__ == "__main__": A__ = input('''please input some numbers:''') A__ = SubArray(whole_array) A__ = array.solve_sub_array() print(('''the results is:''', re))
230
"""simple docstring""" from __future__ import annotations from collections import Counter from random import random class a : def __init__( self : Union[str, Any] ): _UpperCAmelCase = {} def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ): _UpperCAmelCase = {} def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : float ): if nodea not in self.connections: self.add_node(__lowerCAmelCase ) if nodea not in self.connections: self.add_node(__lowerCAmelCase ) _UpperCAmelCase = probability def lowerCAmelCase_ ( self : Optional[Any] ): return list(self.connections ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str ): _UpperCAmelCase = 0 _UpperCAmelCase = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(lowercase ,lowercase ,lowercase ) _UpperCAmelCase = Counter(graph.get_nodes() ) _UpperCAmelCase = start for _ in range(lowercase ): _UpperCAmelCase = graph.transition(lowercase ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
289
0
'''simple docstring''' from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( "pipelines_utils", "0.22.0", "Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.", standard_warn=False, stacklevel=3, )
360
'''simple docstring''' import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str: '''simple docstring''' with open(SCREAMING_SNAKE_CASE__ ) as metadata_file: snake_case : int = json.load(SCREAMING_SNAKE_CASE__ ) snake_case : Any = LukeConfig(use_entity_aware_attention=SCREAMING_SNAKE_CASE__ , **metadata['''model_config'''] ) # Load in the weights from the checkpoint_path snake_case : Any = torch.load(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )['''module'''] # Load the entity vocab file snake_case : Dict = load_original_entity_vocab(SCREAMING_SNAKE_CASE__ ) # add an entry for [MASK2] snake_case : List[str] = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 snake_case : int = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] ) # Add special tokens to the token vocabulary for downstream tasks snake_case : Union[str, Any] = AddedToken('''<ent>''' , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) snake_case : Optional[int] = AddedToken('''<ent2>''' , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F'Saving tokenizer to {pytorch_dump_folder_path}' ) tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ ) with open(os.path.join(SCREAMING_SNAKE_CASE__ , '''tokenizer_config.json''' ) , '''r''' ) as f: snake_case : Tuple = json.load(SCREAMING_SNAKE_CASE__ ) snake_case : List[str] = '''MLukeTokenizer''' with open(os.path.join(SCREAMING_SNAKE_CASE__ , '''tokenizer_config.json''' ) , '''w''' ) as f: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) with open(os.path.join(SCREAMING_SNAKE_CASE__ , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case : List[Any] = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ) # Initialize the embeddings of the special tokens snake_case : List[str] = tokenizer.convert_tokens_to_ids(['''@'''] )[0] snake_case : List[str] = tokenizer.convert_tokens_to_ids(['''#'''] )[0] snake_case : List[str] = state_dict['''embeddings.word_embeddings.weight'''] snake_case : int = word_emb[ent_init_index].unsqueeze(0 ) snake_case : Union[str, Any] = word_emb[enta_init_index].unsqueeze(0 ) snake_case : Dict = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: snake_case : Dict = state_dict[bias_name] snake_case : Any = decoder_bias[ent_init_index].unsqueeze(0 ) snake_case : str = decoder_bias[enta_init_index].unsqueeze(0 ) snake_case : Any = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: snake_case : Optional[Any] = F'encoder.layer.{layer_index}.attention.self.' snake_case : int = state_dict[prefix + matrix_name] snake_case : Union[str, Any] = state_dict[prefix + matrix_name] snake_case : int = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks snake_case : List[Any] = state_dict['''entity_embeddings.entity_embeddings.weight'''] snake_case : Dict = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 ) snake_case : List[Any] = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' snake_case : Optional[Any] = state_dict['''entity_predictions.bias'''] snake_case : Optional[int] = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 ) snake_case : List[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] ) snake_case : str = LukeForMaskedLM(config=SCREAMING_SNAKE_CASE__ ).eval() state_dict.pop('''entity_predictions.decoder.weight''' ) state_dict.pop('''lm_head.decoder.weight''' ) state_dict.pop('''lm_head.decoder.bias''' ) snake_case : Optional[Any] = OrderedDict() for key, value in state_dict.items(): if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )): snake_case : int = state_dict[key] else: snake_case : List[str] = state_dict[key] snake_case ,snake_case : int = model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ ) if set(SCREAMING_SNAKE_CASE__ ) != {"luke.embeddings.position_ids"}: raise ValueError(F'Unexpected unexpected_keys: {unexpected_keys}' ) if set(SCREAMING_SNAKE_CASE__ ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(F'Unexpected missing_keys: {missing_keys}' ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs snake_case : Optional[int] = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , task='''entity_classification''' ) snake_case : Tuple = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).''' snake_case : int = (0, 9) snake_case : str = tokenizer(SCREAMING_SNAKE_CASE__ , entity_spans=[span] , return_tensors='''pt''' ) snake_case : Union[str, Any] = model(**SCREAMING_SNAKE_CASE__ ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base snake_case : Dict = torch.Size((1, 33, 768) ) snake_case : int = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base snake_case : str = torch.Size((1, 1, 768) ) snake_case : Tuple = torch.tensor([[-0.1482, 0.0609, 0.0322]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is' F' {expected_shape}' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ): raise ValueError # Verify masked word/entity prediction snake_case : str = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ) snake_case : List[Any] = '''Tokyo is the capital of <mask>.''' snake_case : Union[str, Any] = (24, 30) snake_case : Tuple = tokenizer(SCREAMING_SNAKE_CASE__ , entity_spans=[span] , return_tensors='''pt''' ) snake_case : int = model(**SCREAMING_SNAKE_CASE__ ) snake_case : List[str] = encoding['''input_ids'''][0].tolist() snake_case : Union[str, Any] = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) ) snake_case : Dict = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(SCREAMING_SNAKE_CASE__ ) snake_case : List[Any] = outputs.entity_logits[0][0].argmax().item() snake_case : Dict = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print('''Saving PyTorch model to {}'''.format(SCREAMING_SNAKE_CASE__ ) ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> List[str]: '''simple docstring''' snake_case : Dict = ['''[MASK]''', '''[PAD]''', '''[UNK]'''] snake_case : List[Any] = [json.loads(SCREAMING_SNAKE_CASE__ ) for line in open(SCREAMING_SNAKE_CASE__ )] snake_case : Optional[int] = {} for entry in data: snake_case : Optional[Any] = entry['''id'''] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: snake_case : List[str] = entity_id break snake_case : Any = F'{language}:{entity_name}' snake_case : List[str] = entity_id return new_mapping if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.") parser.add_argument( "--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration." ) parser.add_argument( "--entity_vocab_path", default=None, type=str, help="Path to an entity_vocab.tsv file, containing the entity vocabulary.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model." ) parser.add_argument( "--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted." ) lowercase__ = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
83
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase : Optional[Any] = { "configuration_upernet": ["UperNetConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : List[Any] = [ "UperNetForSemanticSegmentation", "UperNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_upernet import UperNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel else: import sys __lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
156
import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]: # Initialise PyTorch model __lowercase : Tuple = RemBertConfig.from_json_file(__lowerCAmelCase ) print('''Building PyTorch model from configuration: {}'''.format(str(__lowerCAmelCase ) ) ) __lowercase : Union[str, Any] = RemBertModel(__lowerCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_rembert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Save pytorch-model print('''Save PyTorch model to {}'''.format(__lowerCAmelCase ) ) torch.save(model.state_dict() , __lowerCAmelCase ) if __name__ == "__main__": __lowerCAmelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--rembert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained RemBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __lowerCAmelCase : List[str] = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
156
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) def _A ( A__ , A__=False ): """simple docstring""" __lowercase = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"blocks.{i}.norm1.weight", F"deit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((F"blocks.{i}.norm1.bias", F"deit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append((F"blocks.{i}.attn.proj.weight", F"deit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append((F"blocks.{i}.attn.proj.bias", F"deit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((F"blocks.{i}.norm2.weight", F"deit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((F"blocks.{i}.norm2.bias", F"deit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"deit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"deit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"deit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"deit.encoder.layer.{i}.output.dense.bias") ) # projection layer + position embeddings rename_keys.extend( [ ('''cls_token''', '''deit.embeddings.cls_token'''), ('''dist_token''', '''deit.embeddings.distillation_token'''), ('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''), ('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''), ('''pos_embed''', '''deit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" __lowercase = [(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ('''norm.weight''', '''deit.layernorm.weight'''), ('''norm.bias''', '''deit.layernorm.bias'''), ('''head.weight''', '''cls_classifier.weight'''), ('''head.bias''', '''cls_classifier.bias'''), ('''head_dist.weight''', '''distillation_classifier.weight'''), ('''head_dist.bias''', '''distillation_classifier.bias'''), ] ) return rename_keys def _A ( A__ , A__ , A__=False ): """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: __lowercase = '''''' else: __lowercase = '''deit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __lowercase = state_dict.pop(F"blocks.{i}.attn.qkv.weight" ) __lowercase = state_dict.pop(F"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict __lowercase = in_proj_weight[ : config.hidden_size, : ] __lowercase = in_proj_bias[: config.hidden_size] __lowercase = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __lowercase = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __lowercase = in_proj_weight[ -config.hidden_size :, : ] __lowercase = in_proj_bias[-config.hidden_size :] def _A ( A__ , A__ , A__ ): """simple docstring""" __lowercase = dct.pop(A__ ) __lowercase = val def _A ( ): """simple docstring""" __lowercase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __lowercase = Image.open(requests.get(A__ , stream=A__ ).raw ) return im @torch.no_grad() def _A ( A__ , A__ ): """simple docstring""" __lowercase = DeiTConfig() # all deit models have fine-tuned heads __lowercase = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size __lowercase = 1000 __lowercase = '''huggingface/label-files''' __lowercase = '''imagenet-1k-id2label.json''' __lowercase = json.load(open(hf_hub_download(A__ , A__ , repo_type='''dataset''' ) , '''r''' ) ) __lowercase = {int(A__ ): v for k, v in idalabel.items()} __lowercase = idalabel __lowercase = {v: k for k, v in idalabel.items()} __lowercase = int(deit_name[-6:-4] ) __lowercase = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith('''tiny''' ): __lowercase = 192 __lowercase = 768 __lowercase = 12 __lowercase = 3 elif deit_name[9:].startswith('''small''' ): __lowercase = 384 __lowercase = 1536 __lowercase = 12 __lowercase = 6 if deit_name[9:].startswith('''base''' ): pass elif deit_name[4:].startswith('''large''' ): __lowercase = 1024 __lowercase = 4096 __lowercase = 24 __lowercase = 16 # load original model from timm __lowercase = timm.create_model(A__ , pretrained=A__ ) timm_model.eval() # load state_dict of original model, remove and rename some keys __lowercase = timm_model.state_dict() __lowercase = create_rename_keys(A__ , A__ ) for src, dest in rename_keys: rename_key(A__ , A__ , A__ ) read_in_q_k_v(A__ , A__ , A__ ) # load HuggingFace model __lowercase = DeiTForImageClassificationWithTeacher(A__ ).eval() model.load_state_dict(A__ ) # Check outputs on an image, prepared by DeiTImageProcessor __lowercase = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 __lowercase = DeiTImageProcessor(size=A__ , crop_size=config.image_size ) __lowercase = image_processor(images=prepare_img() , return_tensors='''pt''' ) __lowercase = encoding['''pixel_values'''] __lowercase = model(A__ ) __lowercase = timm_model(A__ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(A__ , outputs.logits , atol=1e-3 ) Path(A__ ).mkdir(exist_ok=A__ ) print(F"Saving model {deit_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(A__ ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(A__ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--deit_name''', default='''vit_deit_base_distilled_patch16_224''', type=str, help='''Name of the DeiT timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) lowerCAmelCase__ = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
368
'''simple docstring''' import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase_ : """simple docstring""" def __init__( self : int ,lowercase__ : str ,lowercase__ : List[Any]=1_3 ,lowercase__ : Optional[int]=3_2 ,lowercase__ : Any=3 ,lowercase__ : int=4 ,lowercase__ : Optional[int]=[1_0, 2_0, 3_0, 4_0] ,lowercase__ : List[Any]=[2, 2, 3, 2] ,lowercase__ : List[Any]=True ,lowercase__ : Optional[Any]=True ,lowercase__ : int=3_7 ,lowercase__ : Union[str, Any]="gelu" ,lowercase__ : Tuple=1_0 ,lowercase__ : int=0.0_2 ,lowercase__ : Any=["stage2", "stage3", "stage4"] ,lowercase__ : Optional[Any]=3 ,lowercase__ : Tuple=None ,): __lowercase = parent __lowercase = batch_size __lowercase = image_size __lowercase = num_channels __lowercase = num_stages __lowercase = hidden_sizes __lowercase = depths __lowercase = is_training __lowercase = use_labels __lowercase = intermediate_size __lowercase = hidden_act __lowercase = type_sequence_label_size __lowercase = initializer_range __lowercase = out_features __lowercase = num_labels __lowercase = scope __lowercase = num_stages def SCREAMING_SNAKE_CASE ( self : str ): __lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) __lowercase = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE ( self : str ): return ConvNextConfig( num_channels=self.num_channels ,num_stages=self.num_stages ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,is_training=self.is_training ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,out_features=self.out_features ,) def SCREAMING_SNAKE_CASE ( self : List[Any] ): return UperNetConfig( backbone_config=self.get_backbone_config() ,hidden_size=5_1_2 ,pool_scales=[1, 2, 3, 6] ,use_auxiliary_head=lowercase__ ,auxiliary_loss_weight=0.4 ,auxiliary_in_channels=4_0 ,auxiliary_channels=2_5_6 ,auxiliary_num_convs=1 ,auxiliary_concat_input=lowercase__ ,loss_ignore_index=2_5_5 ,num_labels=self.num_labels ,) def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : Any ): __lowercase = UperNetForSemanticSegmentation(config=lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model(lowercase__ ) self.parent.assertEqual( result.logits.shape ,(self.batch_size, self.num_labels, self.image_size, self.image_size) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): __lowercase = self.prepare_config_and_inputs() ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = config_and_inputs __lowercase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = (UperNetForSemanticSegmentation,) if is_torch_available() else () SCREAMING_SNAKE_CASE : Tuple = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {} SCREAMING_SNAKE_CASE : Tuple = False SCREAMING_SNAKE_CASE : Dict = False SCREAMING_SNAKE_CASE : Any = False SCREAMING_SNAKE_CASE : Optional[Any] = False SCREAMING_SNAKE_CASE : List[Any] = False SCREAMING_SNAKE_CASE : List[Any] = False def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): __lowercase = UperNetModelTester(self ) __lowercase = ConfigTester(self ,config_class=lowercase__ ,has_text_modality=lowercase__ ,hidden_size=3_7 ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE ( self : Optional[int] ): return def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase = model_class(lowercase__ ) __lowercase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase = [*signature.parameters.keys()] __lowercase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] ,lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*lowercase__ ) @unittest.skip(reason='''UperNet does not use inputs_embeds''' ) def SCREAMING_SNAKE_CASE ( self : int ): pass @unittest.skip(reason='''UperNet does not support input and output embeddings''' ) def SCREAMING_SNAKE_CASE ( self : str ): pass @unittest.skip(reason='''UperNet does not have a base model''' ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): pass @unittest.skip(reason='''UperNet does not have a base model''' ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): pass @require_torch_multi_gpu @unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def SCREAMING_SNAKE_CASE ( self : Any ): pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): pass def SCREAMING_SNAKE_CASE ( self : Any ): def check_hidden_states_output(lowercase__ : List[str] ,lowercase__ : int ,lowercase__ : List[str] ): __lowercase = model_class(lowercase__ ) model.to(lowercase__ ) model.eval() with torch.no_grad(): __lowercase = model(**self._prepare_for_class(lowercase__ ,lowercase__ ) ) __lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __lowercase = self.model_tester.num_stages self.assertEqual(len(lowercase__ ) ,expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,) __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase = True check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowercase = True check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Dict ): __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() __lowercase = _config_zero_init(lowercase__ ) __lowercase = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: __lowercase = model_class(config=lowercase__ ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F"Parameter {name} of model {model_class} seems not properly initialized" ,) @unittest.skip(reason='''UperNet does not have tied weights''' ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): pass @slow def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase = UperNetForSemanticSegmentation.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) def _A ( ): """simple docstring""" __lowercase = hf_hub_download( repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' ) __lowercase = Image.open(A__ ).convert('''RGB''' ) return image @require_torch @require_vision @slow class lowercase_ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): __lowercase = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' ) __lowercase = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(lowercase__ ) __lowercase = prepare_img() __lowercase = processor(images=lowercase__ ,return_tensors='''pt''' ).to(lowercase__ ) with torch.no_grad(): __lowercase = model(**lowercase__ ) __lowercase = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) ) self.assertEqual(outputs.logits.shape ,lowercase__ ) __lowercase = torch.tensor( [[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(lowercase__ ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] ,lowercase__ ,atol=1e-4 ) ) def SCREAMING_SNAKE_CASE ( self : Tuple ): __lowercase = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' ) __lowercase = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(lowercase__ ) __lowercase = prepare_img() __lowercase = processor(images=lowercase__ ,return_tensors='''pt''' ).to(lowercase__ ) with torch.no_grad(): __lowercase = model(**lowercase__ ) __lowercase = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) ) self.assertEqual(outputs.logits.shape ,lowercase__ ) __lowercase = torch.tensor( [[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(lowercase__ ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] ,lowercase__ ,atol=1e-4 ) )
52
0
import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class A_ (a_ , a_ , a_ ): @register_to_config def __init__( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , _A = False , ): '''simple docstring''' super().__init__() UpperCAmelCase = nn.Embedding(_A , _A ) UpperCAmelCase = nn.Embedding(_A , _A ) UpperCAmelCase = False UpperCAmelCase = nn.Dropout(p=_A ) UpperCAmelCase = TaConfig( vocab_size=_A , d_model=_A , num_heads=_A , d_kv=_A , d_ff=_A , dropout_rate=_A , feed_forward_proj=_A , is_decoder=_A , is_encoder_decoder=_A , ) UpperCAmelCase = nn.ModuleList() for lyr_num in range(_A ): UpperCAmelCase = TaBlock(_A ) self.encoders.append(_A ) UpperCAmelCase = TaLayerNorm(_A ) UpperCAmelCase = nn.Dropout(p=_A ) def _lowercase ( self , _A , _A ): '''simple docstring''' UpperCAmelCase = self.token_embedder(_A ) UpperCAmelCase = encoder_input_tokens.shape[1] UpperCAmelCase = torch.arange(_A , device=encoder_input_tokens.device ) x += self.position_encoding(_A ) UpperCAmelCase = self.dropout_pre(_A ) # inverted the attention mask UpperCAmelCase = encoder_input_tokens.size() UpperCAmelCase = self.get_extended_attention_mask(_A , _A ) for lyr in self.encoders: UpperCAmelCase = lyr(_A , _A )[0] UpperCAmelCase = self.layer_norm(_A ) return self.dropout_post(_A ), encoder_inputs_mask
273
from __future__ import annotations from collections import namedtuple def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> tuple: '''simple docstring''' UpperCAmelCase = namedtuple('''result''' , '''name value''' ) if (voltage, current, power).count(0 ) != 1: raise ValueError('''Only one argument must be 0''' ) elif power < 0: raise ValueError( '''Power cannot be negative in any electrical/electronics system''' ) elif voltage == 0: return result('''voltage''' , power / current ) elif current == 0: return result('''current''' , power / voltage ) elif power == 0: return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
273
1
import json import os import unittest from typing import Tuple from transformers import WavaVecaPhonemeCTCTokenizer from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput from transformers.testing_utils import require_phonemizer from ...test_tokenization_common import TokenizerTesterMixin @require_phonemizer class SCREAMING_SNAKE_CASE__ (__snake_case , unittest.TestCase ): __lowerCamelCase : int = WavaVecaPhonemeCTCTokenizer __lowerCamelCase : Tuple = False def snake_case_ ( self): super().setUp() lowercase__ : Optional[int] = ( '<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː ' 'ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː ' 'ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 ' 'oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ ' 'pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ ' 'yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ ' 'əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ ' 'ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ ' 'ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ ' 'uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ ' 'ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ ' 'ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ ' 'ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4' ).split(' ') lowercase__ : str = dict(zip(a , range(len(a)))) lowercase__ : Union[str, Any] = {'pad_token': '<pad>', 'unk_token': '<unk>', 'bos_token': '<s>', 'eos_token': '</s>'} lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file']) with open(self.vocab_file , 'w' , encoding='utf-8') as fp: fp.write(json.dumps(a) + '\n') def snake_case_ ( self , a , a=False , a=20 , a=5): lowercase__ : int = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=a)) for i in range(len(a))] lowercase__ : List[Any] = list(filter(lambda a: [t[0]] == tokenizer.encode(t[1] , do_phonemize=a) , a)) if max_length is not None and len(a) > max_length: lowercase__ : str = toks[:max_length] if min_length is not None and len(a) < min_length and len(a) > 0: while len(a) < min_length: lowercase__ : Dict = toks + toks # toks_str = [t[1] for t in toks] lowercase__ : Optional[int] = [t[0] for t in toks] # Ensure consistency lowercase__ : str = tokenizer.decode(a , clean_up_tokenization_spaces=a) if " " not in output_txt and len(a) > 1: lowercase__ : Dict = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=a) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=a) ) if with_prefix_space: lowercase__ : str = ' ' + output_txt lowercase__ : Optional[int] = tokenizer.encode(a , add_special_tokens=a) return output_txt, output_ids def snake_case_ ( self , **a): kwargs.update(self.special_tokens_map) return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **a) def snake_case_ ( self): lowercase__ : Dict = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft') # check adding a single token tokenizer.add_tokens('xxx') lowercase__ : List[Any] = tokenizer('m xxx ɪ' , do_phonemize=a).input_ids self.assertEqual(a , [13, 392, 17]) # xxx should be last token tokenizer.add_tokens(['aaa', 'bbb', 'ccc']) lowercase__ : List[str] = tokenizer('m aaa ɪ ccc' , do_phonemize=a).input_ids self.assertEqual(a , [13, 393, 17, 395]) # aaa and ccc should be after xxx and 2 after aaa lowercase__ : List[Any] = tokenizer('maɪ c' , do_phonemize=a).input_ids self.assertEqual(a , [3, 200]) # mai should be <unk> (=3) def snake_case_ ( self): lowercase__ : List[str] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft') lowercase__ : Tuple = 'Hello how are you' lowercase__ : int = tokenizer.phonemize(a , phonemizer_lang='en-us') self.assertEqual(a , 'h ə l oʊ h aʊ ɑːɹ j uː') def snake_case_ ( self): lowercase__ : Union[str, Any] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft') lowercase__ : Union[str, Any] = 'Hello how are you' lowercase__ : List[Any] = tokenizer.phonemize(a , phonemizer_lang='en-us') self.assertEqual(tokenizer(a).input_ids , tokenizer(a , do_phonemize=a).input_ids) def snake_case_ ( self): lowercase__ : Optional[Any] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft') lowercase__ : Optional[int] = 'Hello how are you' lowercase__ : Tuple = tokenizer.phonemize(a , phonemizer_lang='en-us') lowercase__ : Dict = tokenizer.decode(tokenizer(a).input_ids) self.assertEqual(a , a) def snake_case_ ( self): lowercase__ : List[str] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft') lowercase__ : str = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98], [24, 22, 5, 24, 22, 5, 77], ] lowercase__ : int = tokenizer.decode(sample_ids[0]) lowercase__ : Dict = tokenizer.batch_decode(a) self.assertEqual(a , batch_tokens[0]) self.assertEqual(a , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ']) def snake_case_ ( self): lowercase__ : List[str] = self.tokenizer_class.from_pretrained( 'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|') tokenizer.add_tokens('|') lowercase__ : Any = 'Hello how are you' lowercase__ : List[Any] = tokenizer.phonemize(a , phonemizer_lang='en-us') self.assertEqual(a , 'h ə l oʊ | h aʊ | ɑːɹ | j uː |') def snake_case_ ( self): lowercase__ : List[Any] = self.tokenizer_class.from_pretrained( 'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|') tokenizer.add_tokens('|') lowercase__ : Any = 'Hello how are you' lowercase__ : Optional[Any] = tokenizer.phonemize(a , phonemizer_lang='en-us') self.assertEqual(tokenizer(a).input_ids , tokenizer(a , do_phonemize=a).input_ids) def snake_case_ ( self): lowercase__ : int = self.tokenizer_class.from_pretrained( 'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|') tokenizer.add_tokens('|') # fmt: off lowercase__ : List[str] = [ [11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98], [tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77], ] # fmt: on # decode with word_del_token filter lowercase__ : Dict = tokenizer.decode(sample_ids[0]) lowercase__ : List[str] = tokenizer.batch_decode(a) self.assertEqual(a , batch_tokens[0]) self.assertEqual(a , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ']) # decode with no word_del_token filter lowercase__ : Optional[Any] = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=a) lowercase__ : List[Any] = tokenizer.batch_decode(a , filter_word_delimiter_token=a) self.assertEqual(a , batch_tokens[0]) self.assertEqual(a , ['k s ɾ | ɾ l | ɭʲ', '| j ð | s j ð s oːɹ']) def snake_case_ ( self): lowercase__ : Optional[Any] = self.tokenizer_class.from_pretrained( 'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|') tokenizer.add_tokens('|') lowercase__ : List[Any] = 'Hello how are you' lowercase__ : List[Any] = tokenizer.phonemize(a , phonemizer_lang='en-us') lowercase__ : Dict = tokenizer.decode(tokenizer(a).input_ids , filter_word_delimiter_token=a) self.assertEqual(a , a) def snake_case_ ( self): lowercase__ : List[str] = self.tokenizer_class.from_pretrained( 'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|') tokenizer.add_tokens('|') lowercase__ : Optional[int] = 'Hello how are you' lowercase__ : int = tokenizer.phonemize(a , phonemizer_lang='en-us') lowercase__ : List[str] = tokenizer.decode(tokenizer(a).input_ids , filter_word_delimiter_token=a) self.assertEqual(' '.join([p.strip() for p in phonemes.split(' |')]).strip() , a) def snake_case_ ( self): lowercase__ : Any = self.tokenizer_class.from_pretrained( 'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token=a) lowercase__ : Tuple = 'Hello how are you' lowercase__ : Optional[Any] = tokenizer(a , phonemizer_lang='en-us').input_ids lowercase__ : Any = tokenizer(a , phonemizer_lang='fr-fr').input_ids self.assertNotEqual(a , a) lowercase__ : Optional[Any] = tokenizer.decode(a) lowercase__ : Optional[Any] = tokenizer.decode(a) self.assertEqual(a , 'h ə l oʊ h aʊ ɑːɹ j uː') self.assertEqual(a , 'ɛ l o h aʊ a ʁ j u') def snake_case_ ( self): lowercase__ : List[str] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft') lowercase__ : Optional[Any] = 'Hello how Are you' lowercase__ : List[Any] = 'hello how are you' lowercase__ : int = tokenizer(a).input_ids lowercase__ : Optional[int] = tokenizer(a).input_ids self.assertEqual(a , a) def snake_case_ ( self): lowercase__ : List[str] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft') tokenizer.add_tokens(['!', '?']) tokenizer.add_special_tokens({'cls_token': '$$$'}) # fmt: off lowercase__ : Dict = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394], [24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394], ] # fmt: on lowercase__ : Union[str, Any] = tokenizer.batch_decode(a) self.assertEqual(a , ['k s ɾ ɾ l ɭʲ!?!? $$$', 'j ð s j ð s oːɹ $$$']) @staticmethod def snake_case_ ( a , a): lowercase__ : Tuple = [d[key] for d in offsets] return retrieved_list def snake_case_ ( self): lowercase__ : str = self.get_tokenizer(word_delimiter_token='|') tokenizer.add_tokens('|') # fmt: off # ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ" lowercase__ : Dict = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98] # fmt: on lowercase__ : Tuple = tokenizer.decode(a , output_char_offsets=a , filter_word_delimiter_token=a) # check Wav2Vec2CTCTokenizerOutput keys for char self.assertEqual(len(outputs.keys()) , 2) self.assertTrue('text' in outputs) self.assertTrue('char_offsets' in outputs) self.assertTrue(isinstance(a , a)) # check that order of chars is correct and identical for both outputs self.assertEqual(' '.join(self.get_from_offsets(outputs['char_offsets'] , 'char')) , outputs.text) self.assertListEqual( self.get_from_offsets(outputs['char_offsets'] , 'char') , ['k', 's', 'ɾ', 'ɾ', '|', 'ɾ', 'l', '|', 'ɭʲ']) # check that offsets are actually correct for char # 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token, # 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98 self.assertListEqual( self.get_from_offsets(outputs['char_offsets'] , 'start_offset') , [0, 1, 4, 7, 9, 11, 12, 15, 16]) self.assertListEqual( self.get_from_offsets(outputs['char_offsets'] , 'end_offset') , [1, 4, 6, 9, 10, 12, 15, 16, 17]) def snake_case_ ( self): lowercase__ : Dict = self.get_tokenizer(word_delimiter_token='|') def check_list_tuples_equal(a , a): self.assertTrue(isinstance(a , a)) self.assertTrue(isinstance(outputs_list[0] , a)) # transform list to ModelOutput lowercase__ : Union[str, Any] = WavaVecaPhonemeCTCTokenizerOutput( {k: [d[k] for d in outputs_list] for k in outputs_list[0]}) self.assertListEqual(outputs_batch['text'] , outputs_batch_a['text']) def recursive_check(a , a): if isinstance(a , a): [recursive_check(a , a) for la, la in zip(a , a)] self.assertEqual(a , a) if "char_offsets" in outputs_batch: recursive_check(outputs_batch['char_offsets'] , outputs_batch_a['char_offsets']) # fmt: off lowercase__ : List[str] = [ [11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34], [24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34], ] # fmt: on # We assume that `decode` works as expected. All we will check now is # the output type is correct and the output is identical to `decode` # char lowercase__ : str = tokenizer.batch_decode(a , output_char_offsets=a) lowercase__ : Tuple = [tokenizer.decode(a , output_char_offsets=a) for ids in sample_ids] check_list_tuples_equal(a , a) @unittest.skip('Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes') def snake_case_ ( self): pass @unittest.skip('Wav2Vec2PhonemeTokenizer always puts spaces between phonemes') def snake_case_ ( self): pass @unittest.skip('encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency') def snake_case_ ( self): pass @unittest.skip('Wav2Vec2PhonemeModel has no max model length => no testing') def snake_case_ ( self): pass def snake_case_ ( self): lowercase__ : List[Any] = self.get_tokenizers(do_lower_case=a) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}"""): lowercase__ : Any = tokenizer.vocab_size lowercase__ : Tuple = len(a) self.assertNotEqual(a , 0) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) lowercase__ : Optional[int] = ['aaaaa bbbbbb', 'cccccccccdddddddd'] lowercase__ : Dict = tokenizer.add_tokens(a) lowercase__ : int = tokenizer.vocab_size lowercase__ : Tuple = len(a) self.assertNotEqual(a , 0) self.assertEqual(a , a) self.assertEqual(a , len(a)) self.assertEqual(a , all_size + len(a)) lowercase__ : Dict = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=a) self.assertGreaterEqual(len(a) , 4) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1) lowercase__ : str = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'} lowercase__ : Tuple = tokenizer.add_special_tokens(a) lowercase__ : Dict = tokenizer.vocab_size lowercase__ : int = len(a) self.assertNotEqual(a , 0) self.assertEqual(a , a) self.assertEqual(a , len(a)) self.assertEqual(a , all_size_a + len(a)) lowercase__ : List[str] = tokenizer.encode( '>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=a) self.assertGreaterEqual(len(a) , 6) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1) self.assertGreater(tokens[0] , tokens[1]) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1) self.assertGreater(tokens[-3] , tokens[-4]) self.assertEqual(tokens[0] , tokenizer.eos_token_id) self.assertEqual(tokens[-3] , tokenizer.pad_token_id) @unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.') def snake_case_ ( self): pass @unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.') def snake_case_ ( self): pass def snake_case_ ( self): # The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which # is not the case for Wav2Vec2PhonemeCTCTokenizer. lowercase__ : List[str] = self.get_tokenizers(fast=a , do_lower_case=a) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}"""): lowercase__ : Dict = ['ð', 'ɪ', 's', 'ɪ', 'z', 'ɐ', 't', 'ɛ', 'k', 's', 't'] lowercase__ : str = tokenizer.convert_tokens_to_string(a) self.assertIsInstance(output['text'] , a)
216
import argparse import json import os import torch from transformers.file_utils import has_file from diffusers import UNetaDConditionModel, UNetaDModel snake_case_ = False snake_case_ = True snake_case_ = False if __name__ == "__main__": snake_case_ = argparse.ArgumentParser() parser.add_argument( '''--repo_path''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') snake_case_ = parser.parse_args() snake_case_ = { '''image_size''': '''sample_size''', '''num_res_blocks''': '''layers_per_block''', '''block_channels''': '''block_out_channels''', '''down_blocks''': '''down_block_types''', '''up_blocks''': '''up_block_types''', '''downscale_freq_shift''': '''freq_shift''', '''resnet_num_groups''': '''norm_num_groups''', '''resnet_act_fn''': '''act_fn''', '''resnet_eps''': '''norm_eps''', '''num_head_channels''': '''attention_head_dim''', } snake_case_ = { '''time_steps''': '''time_proj''', '''mid''': '''mid_block''', '''downsample_blocks''': '''down_blocks''', '''upsample_blocks''': '''up_blocks''', } snake_case_ = '''''' if has_file(args.repo_path, '''config.json''') else '''unet''' with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader: snake_case_ = reader.read() snake_case_ = json.loads(text) if do_only_config: for key in config_parameters_to_change.keys(): config.pop(key, None) if has_file(args.repo_path, '''config.json'''): snake_case_ = UNetaDModel(**config) else: snake_case_ = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel snake_case_ = class_name(**config) if do_only_config: model.save_config(os.path.join(args.repo_path, subfolder)) snake_case_ = dict(model.config) if do_only_renaming: for key, value in config_parameters_to_change.items(): if key in config: snake_case_ = config[key] del config[key] snake_case_ = [k.replace('''UNetRes''', '''''') for k in config['''down_block_types''']] snake_case_ = [k.replace('''UNetRes''', '''''') for k in config['''up_block_types''']] if do_only_weights: snake_case_ = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin''')) snake_case_ = {} for param_key, param_value in state_dict.items(): if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''): continue snake_case_ = False for key, new_key in key_parameters_to_change.items(): if not has_changed and param_key.split('''.''')[0] == key: snake_case_ = param_value snake_case_ = True if not has_changed: snake_case_ = param_value model.load_state_dict(new_state_dict) model.save_pretrained(os.path.join(args.repo_path, subfolder))
216
1
'''simple docstring''' # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class A : '''simple docstring''' A = 42 # setable values A = 42 A = 42 A = None @classmethod def a_ (cls , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]: return cls(common=_UpperCAmelCase , init_noise_sigma=_UpperCAmelCase , timesteps=_UpperCAmelCase ) @dataclass class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = 42 class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = [e.name for e in FlaxKarrasDiffusionSchedulers] A = 42 @property def a_ (self ) -> Dict: return True @register_to_config def __init__(self , _UpperCAmelCase = 1_0_0_0 , _UpperCAmelCase = 0.0_001 , _UpperCAmelCase = 0.02 , _UpperCAmelCase = "linear" , _UpperCAmelCase = None , _UpperCAmelCase = "fixed_small" , _UpperCAmelCase = True , _UpperCAmelCase = "epsilon" , _UpperCAmelCase = jnp.floataa , ) -> Tuple: __UpperCamelCase : Optional[int] = dtype def a_ (self , _UpperCAmelCase = None ) -> DDPMSchedulerState: if common is None: __UpperCamelCase : Any = CommonSchedulerState.create(self ) # standard deviation of the initial noise distribution __UpperCamelCase : Tuple = jnp.array(1.0 , dtype=self.dtype ) __UpperCamelCase : str = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1] return DDPMSchedulerState.create( common=_UpperCAmelCase , init_noise_sigma=_UpperCAmelCase , timesteps=_UpperCAmelCase , ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None ) -> jnp.ndarray: return sample def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = () ) -> DDPMSchedulerState: __UpperCamelCase : Optional[int] = self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 __UpperCamelCase : int = (jnp.arange(0 , _UpperCAmelCase ) * step_ratio).round()[::-1] return state.replace( num_inference_steps=_UpperCAmelCase , timesteps=_UpperCAmelCase , ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ) -> Any: __UpperCamelCase : Dict = state.common.alphas_cumprod[t] __UpperCamelCase : Optional[Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample __UpperCamelCase : Union[str, Any] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: __UpperCamelCase : Union[str, Any] = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": __UpperCamelCase : str = jnp.clip(_UpperCAmelCase , a_min=1E-20 ) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": __UpperCamelCase : List[str] = jnp.log(jnp.clip(_UpperCAmelCase , a_min=1E-20 ) ) elif variance_type == "fixed_large": __UpperCamelCase : Optional[int] = state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log __UpperCamelCase : Dict = jnp.log(state.common.betas[t] ) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": __UpperCamelCase : Tuple = variance __UpperCamelCase : str = state.common.betas[t] __UpperCamelCase : Dict = (predicted_variance + 1) / 2 __UpperCamelCase : Any = frac * max_log + (1 - frac) * min_log return variance def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]: __UpperCamelCase : Dict = timestep if key is None: __UpperCamelCase : Tuple = jax.random.PRNGKey(0 ) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: __UpperCamelCase , __UpperCamelCase : Dict = jnp.split(_UpperCAmelCase , sample.shape[1] , axis=1 ) else: __UpperCamelCase : List[Any] = None # 1. compute alphas, betas __UpperCamelCase : Tuple = state.common.alphas_cumprod[t] __UpperCamelCase : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) __UpperCamelCase : Optional[Any] = 1 - alpha_prod_t __UpperCamelCase : List[Any] = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": __UpperCamelCase : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": __UpperCamelCase : List[str] = model_output elif self.config.prediction_type == "v_prediction": __UpperCamelCase : Union[str, Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` " " for the FlaxDDPMScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: __UpperCamelCase : Tuple = jnp.clip(_UpperCAmelCase , -1 , 1 ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf __UpperCamelCase : int = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t __UpperCamelCase : Tuple = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf __UpperCamelCase : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): __UpperCamelCase : int = jax.random.split(_UpperCAmelCase , num=1 ) __UpperCamelCase : int = jax.random.normal(_UpperCAmelCase , shape=model_output.shape , dtype=self.dtype ) return (self._get_variance(_UpperCAmelCase , _UpperCAmelCase , predicted_variance=_UpperCAmelCase ) ** 0.5) * noise __UpperCamelCase : Tuple = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) ) __UpperCamelCase : Tuple = pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=_UpperCAmelCase , state=_UpperCAmelCase ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> jnp.ndarray: return add_noise_common(state.common , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> jnp.ndarray: return get_velocity_common(state.common , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def __len__(self ) -> List[Any]: return self.config.num_train_timesteps
298
'''simple docstring''' import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = ["image_processor", "tokenizer"] A = "OwlViTImageProcessor" A = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ) -> str: __UpperCamelCase : Tuple = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , _UpperCAmelCase , ) __UpperCamelCase : str = kwargs.pop("feature_extractor" ) __UpperCamelCase : Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_UpperCAmelCase , _UpperCAmelCase ) def __call__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="max_length" , _UpperCAmelCase="np" , **_UpperCAmelCase ) -> str: if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(_UpperCAmelCase , _UpperCAmelCase ) or (isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not isinstance(text[0] , _UpperCAmelCase )): __UpperCamelCase : Tuple = [self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )] elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(text[0] , _UpperCAmelCase ): __UpperCamelCase : List[str] = [] # Maximum number of queries across batch __UpperCamelCase : List[str] = max([len(_UpperCAmelCase ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(_UpperCAmelCase ) != max_num_queries: __UpperCamelCase : Any = t + [" "] * (max_num_queries - len(_UpperCAmelCase )) __UpperCamelCase : int = self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) encodings.append(_UpperCAmelCase ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": __UpperCamelCase : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) __UpperCamelCase : int = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp __UpperCamelCase : Tuple = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) __UpperCamelCase : Optional[Any] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch __UpperCamelCase : Any = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) __UpperCamelCase : List[Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf __UpperCamelCase : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) __UpperCamelCase : Optional[Any] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) __UpperCamelCase : Optional[Any] = BatchEncoding() __UpperCamelCase : Union[str, Any] = input_ids __UpperCamelCase : List[str] = attention_mask if query_images is not None: __UpperCamelCase : str = BatchEncoding() __UpperCamelCase : Any = self.image_processor( _UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ).pixel_values __UpperCamelCase : List[Any] = query_pixel_values if images is not None: __UpperCamelCase : Dict = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) if text is not None and images is not None: __UpperCamelCase : Optional[Any] = image_features.pixel_values return encoding elif query_images is not None and images is not None: __UpperCamelCase : Union[str, Any] = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]: return self.image_processor.post_process(*_UpperCAmelCase , **_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]: return self.image_processor.post_process_object_detection(*_UpperCAmelCase , **_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]: return self.image_processor.post_process_image_guided_detection(*_UpperCAmelCase , **_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]: return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> int: return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase ) @property def a_ (self ) -> Tuple: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCAmelCase , ) return self.image_processor_class @property def a_ (self ) -> Union[str, Any]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCAmelCase , ) return self.image_processor
298
1
import unittest from transformers import BigBirdTokenizer, BigBirdTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin A_ :Dict = '''▁''' A_ :Optional[int] = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class __A ( __snake_case , unittest.TestCase ): """simple docstring""" UpperCamelCase__ : Tuple =BigBirdTokenizer UpperCamelCase__ : Dict =BigBirdTokenizerFast UpperCamelCase__ : Union[str, Any] =True UpperCamelCase__ : Optional[Any] =True def __lowercase ( self ): """simple docstring""" super().setUp() __UpperCamelCase : int =self.tokenizer_class(UpperCamelCase__ , keep_accents=UpperCamelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Union[str, Any] ="<s>" __UpperCamelCase : Optional[int] =1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Tuple =list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<unk>' ) self.assertEqual(vocab_keys[1] , '<s>' ) self.assertEqual(vocab_keys[-1] , '[MASK]' ) self.assertEqual(len(UpperCamelCase__ ) , 1004 ) def __lowercase ( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1000 ) def __lowercase ( self ): """simple docstring""" if not self.test_rust_tokenizer: return __UpperCamelCase : Union[str, Any] =self.get_tokenizer() __UpperCamelCase : int =self.get_rust_tokenizer() __UpperCamelCase : Union[str, Any] ="I was born in 92000, and this is falsé." __UpperCamelCase : int =tokenizer.tokenize(UpperCamelCase__ ) __UpperCamelCase : Any =rust_tokenizer.tokenize(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) __UpperCamelCase : Optional[int] =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) __UpperCamelCase : Optional[Any] =rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) __UpperCamelCase : Optional[int] =self.get_rust_tokenizer() __UpperCamelCase : Tuple =tokenizer.encode(UpperCamelCase__ ) __UpperCamelCase : List[str] =rust_tokenizer.encode(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Optional[int] =BigBirdTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ ) __UpperCamelCase : List[str] =tokenizer.tokenize('This is a test' ) self.assertListEqual(UpperCamelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [285, 46, 10, 170, 382] , ) __UpperCamelCase : str =tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( UpperCamelCase__ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) __UpperCamelCase : Any =tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) self.assertListEqual( UpperCamelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) __UpperCamelCase : List[str] =tokenizer.convert_ids_to_tokens(UpperCamelCase__ ) self.assertListEqual( UpperCamelCase__ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) @cached_property def __lowercase ( self ): """simple docstring""" return BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' ) @slow def __lowercase ( self ): """simple docstring""" __UpperCamelCase : List[str] ="Hello World!" __UpperCamelCase : Dict =[65, 18536, 2260, 101, 66] self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) ) @slow def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Optional[Any] =( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth" ) # fmt: off __UpperCamelCase : Dict =[65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231 # fmt: on self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) ) @require_torch @slow def __lowercase ( self ): """simple docstring""" import torch from transformers import BigBirdConfig, BigBirdModel # Build sequence __UpperCamelCase : List[Any] =list(self.big_tokenizer.get_vocab().keys() )[:10] __UpperCamelCase : int =" ".join(UpperCamelCase__ ) __UpperCamelCase : Dict =self.big_tokenizer.encode_plus(UpperCamelCase__ , return_tensors='pt' , return_token_type_ids=UpperCamelCase__ ) __UpperCamelCase : int =self.big_tokenizer.batch_encode_plus( [sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=UpperCamelCase__ ) __UpperCamelCase : Dict =BigBirdConfig(attention_type='original_full' ) __UpperCamelCase : Optional[int] =BigBirdModel(UpperCamelCase__ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**UpperCamelCase__ ) model(**UpperCamelCase__ ) @slow def __lowercase ( self ): """simple docstring""" __UpperCamelCase : List[str] =BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' ) __UpperCamelCase : Optional[int] =tokenizer.decode(tokenizer('Paris is the [MASK].' ).input_ids ) self.assertTrue(decoded_text == '[CLS] Paris is the[MASK].[SEP]' ) @slow def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Optional[Any] ={"input_ids": [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase__ , model_name='google/bigbird-roberta-base' , revision='215c99f1600e06f83acce68422f2035b2b5c3510' , )
370
import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING A_ :List[str] = { '''facebook/mask2former-swin-small-coco-instance''': ( '''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json''' ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } A_ :int = logging.get_logger(__name__) class __A ( a ): """simple docstring""" UpperCamelCase__ : Union[str, Any] ="""mask2former""" UpperCamelCase__ : Tuple =["""swin"""] UpperCamelCase__ : Dict ={"""hidden_size""": """hidden_dim"""} def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 1024 , lowerCamelCase__ = "relu" , lowerCamelCase__ = 6 , lowerCamelCase__ = 10 , lowerCamelCase__ = 8 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 2048 , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = 4 , lowerCamelCase__ = 255 , lowerCamelCase__ = 100 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 2.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 12544 , lowerCamelCase__ = 3.0 , lowerCamelCase__ = 0.75 , lowerCamelCase__ = 0.02 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = True , lowerCamelCase__ = [4, 8, 16, 32] , lowerCamelCase__ = None , **lowerCamelCase__ , ): """simple docstring""" if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' ) __UpperCamelCase : Optional[int] =CONFIG_MAPPING['swin']( image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowerCamelCase__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ): __UpperCamelCase : List[str] =backbone_config.pop('model_type' ) __UpperCamelCase : str =CONFIG_MAPPING[backbone_model_type] __UpperCamelCase : List[Any] =config_class.from_dict(lowerCamelCase__ ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. ' f'Supported model types: {",".join(self.backbones_supported )}' ) __UpperCamelCase : Dict =backbone_config __UpperCamelCase : Optional[int] =feature_size __UpperCamelCase : Union[str, Any] =mask_feature_size __UpperCamelCase : Tuple =hidden_dim __UpperCamelCase : Optional[int] =encoder_feedforward_dim __UpperCamelCase : Optional[int] =activation_function __UpperCamelCase : Dict =encoder_layers __UpperCamelCase : List[Any] =decoder_layers __UpperCamelCase : int =num_attention_heads __UpperCamelCase : Optional[Any] =dropout __UpperCamelCase : int =dim_feedforward __UpperCamelCase : Any =pre_norm __UpperCamelCase : Union[str, Any] =enforce_input_projection __UpperCamelCase : str =common_stride __UpperCamelCase : List[str] =ignore_value __UpperCamelCase : Optional[int] =num_queries __UpperCamelCase : Any =no_object_weight __UpperCamelCase : int =class_weight __UpperCamelCase : str =mask_weight __UpperCamelCase : Dict =dice_weight __UpperCamelCase : str =train_num_points __UpperCamelCase : str =oversample_ratio __UpperCamelCase : int =importance_sample_ratio __UpperCamelCase : List[str] =init_std __UpperCamelCase : Union[str, Any] =init_xavier_std __UpperCamelCase : Any =use_auxiliary_loss __UpperCamelCase : Tuple =feature_strides __UpperCamelCase : Dict =output_auxiliary_logits __UpperCamelCase : Union[str, Any] =decoder_layers super().__init__(**lowerCamelCase__ ) @classmethod def __lowercase ( cls , lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" return cls( backbone_config=lowerCamelCase__ , **lowerCamelCase__ , ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Any =copy.deepcopy(self.__dict__ ) __UpperCamelCase : List[Any] =self.backbone_config.to_dict() __UpperCamelCase : Union[str, Any] =self.__class__.model_type return output
245
0
'''simple docstring''' import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin __UpperCAmelCase = """ Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning. In March 2021, Hugging Face raised $40 million in a Series B funding round.[3] On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5] """ class UpperCamelCase__ ( unittest.TestCase , lowercase_ ): """simple docstring""" def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = load_tool("""text-question-answering""" ) self.tool.setup() SCREAMING_SNAKE_CASE : str = load_tool("""text-question-answering""" , remote=lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.tool(lowerCamelCase_ , """What did Hugging Face do in April 2021?""" ) self.assertEqual(lowerCamelCase_ , """launched the BigScience Research Workshop""" ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.remote_tool(lowerCamelCase_ , """What did Hugging Face do in April 2021?""" ) self.assertEqual(lowerCamelCase_ , """launched the BigScience Research Workshop""" ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.tool(text=lowerCamelCase_ , question="""What did Hugging Face do in April 2021?""" ) self.assertEqual(lowerCamelCase_ , """launched the BigScience Research Workshop""" ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.remote_tool(text=lowerCamelCase_ , question="""What did Hugging Face do in April 2021?""" ) self.assertEqual(lowerCamelCase_ , """launched the BigScience Research Workshop""" )
323
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING __UpperCAmelCase = logging.get_logger(__name__) @add_end_docstrings(lowercase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Tuple , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Tuple ): '''simple docstring''' super().__init__(*lowerCamelCase_ , **lowerCamelCase_ ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Optional[int]=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = {} SCREAMING_SNAKE_CASE : List[Any] = {} if prompt is not None: SCREAMING_SNAKE_CASE : List[Any] = prompt if generate_kwargs is not None: SCREAMING_SNAKE_CASE : Optional[int] = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: SCREAMING_SNAKE_CASE : Union[str, Any] = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( """'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,""" """ please use only one""" ) SCREAMING_SNAKE_CASE : Optional[Any] = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : Optional[Any] , lowerCamelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCamelCase_ : Any ): '''simple docstring''' return super().__call__(lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = load_image(lowerCamelCase_ ) if prompt is not None: if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise ValueError( f'''Received an invalid text input, got - {type(lowerCamelCase_ )} - but expected a single string. ''' """Note also that one single text can be provided for conditional image to text generation.""" ) SCREAMING_SNAKE_CASE : Optional[int] = self.model.config.model_type if model_type == "git": SCREAMING_SNAKE_CASE : Dict = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework ) SCREAMING_SNAKE_CASE : str = self.tokenizer(text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids SCREAMING_SNAKE_CASE : Optional[int] = [self.tokenizer.cls_token_id] + input_ids SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 ) model_inputs.update({"""input_ids""": input_ids} ) elif model_type == "pix2struct": SCREAMING_SNAKE_CASE : int = self.image_processor(images=lowerCamelCase_ , header_text=lowerCamelCase_ , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework ) SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework ) model_inputs.update(lowerCamelCase_ ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: SCREAMING_SNAKE_CASE : Any = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: SCREAMING_SNAKE_CASE : Optional[Any] = None return model_inputs def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any]=None ): '''simple docstring''' if ( "input_ids" in model_inputs and isinstance(model_inputs["""input_ids"""] , lowerCamelCase_ ) and all(x is None for x in model_inputs["""input_ids"""] ) ): SCREAMING_SNAKE_CASE : List[str] = None if generate_kwargs is None: SCREAMING_SNAKE_CASE : int = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. SCREAMING_SNAKE_CASE : Tuple = model_inputs.pop(self.model.main_input_name ) SCREAMING_SNAKE_CASE : Any = self.model.generate(lowerCamelCase_ , **lowerCamelCase_ , **lowerCamelCase_ ) return model_outputs def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = [] for output_ids in model_outputs: SCREAMING_SNAKE_CASE : List[Any] = { """generated_text""": self.tokenizer.decode( lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , ) } records.append(lowerCamelCase_ ) return records
323
1
'''simple docstring''' import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort _lowerCamelCase : str = logging.get_logger(__name__) _lowerCamelCase : int = { 'tensor(bool)': np.bool_, 'tensor(int8)': np.inta, 'tensor(uint8)': np.uinta, 'tensor(int16)': np.intaa, 'tensor(uint16)': np.uintaa, 'tensor(int32)': np.intaa, 'tensor(uint32)': np.uintaa, 'tensor(int64)': np.intaa, 'tensor(uint64)': np.uintaa, 'tensor(float16)': np.floataa, 'tensor(float)': np.floataa, 'tensor(double)': np.floataa, } class __UpperCAmelCase : '''simple docstring''' def __init__(self : str , _lowerCAmelCase : List[str]=None , **_lowerCAmelCase : Optional[Any] ): logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" ) A = model A = kwargs.get("""model_save_dir""" , _lowerCAmelCase ) A = kwargs.get("""latest_model_name""" , _lowerCAmelCase ) def __call__(self : List[Any] , **_lowerCAmelCase : str ): A = {k: np.array(_lowerCAmelCase ) for k, v in kwargs.items()} return self.model.run(_lowerCAmelCase , _lowerCAmelCase ) @staticmethod def A (_lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : int=None , _lowerCAmelCase : int=None ): if provider is None: logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" ) A = """CPUExecutionProvider""" return ort.InferenceSession(_lowerCAmelCase , providers=[provider] , sess_options=_lowerCAmelCase ) def A (self : str , _lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : Optional[str] = None , **_lowerCAmelCase : Dict ): A = file_name if file_name is not None else ONNX_WEIGHTS_NAME A = self.model_save_dir.joinpath(self.latest_model_name ) A = Path(_lowerCAmelCase ).joinpath(_lowerCAmelCase ) try: shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase ) except shutil.SameFileError: pass # copy external weights (for models >2GB) A = self.model_save_dir.joinpath(_lowerCAmelCase ) if src_path.exists(): A = Path(_lowerCAmelCase ).joinpath(_lowerCAmelCase ) try: shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase ) except shutil.SameFileError: pass def A (self : Optional[int] , _lowerCAmelCase : Union[str, os.PathLike] , **_lowerCAmelCase : Any , ): if os.path.isfile(_lowerCAmelCase ): logger.error(F"""Provided path ({save_directory}) should be a directory, not a file""" ) return os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase ) # saving model weights/files self._save_pretrained(_lowerCAmelCase , **_lowerCAmelCase ) @classmethod def A (cls : str , _lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : Optional[Union[bool, str, None]] = None , _lowerCAmelCase : Optional[Union[str, None]] = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional["ort.SessionOptions"] = None , **_lowerCAmelCase : Optional[Any] , ): A = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(_lowerCAmelCase ): A = OnnxRuntimeModel.load_model( os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , provider=_lowerCAmelCase , sess_options=_lowerCAmelCase ) A = Path(_lowerCAmelCase ) # load model from hub else: # download model A = hf_hub_download( repo_id=_lowerCAmelCase , filename=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , revision=_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , ) A = Path(_lowerCAmelCase ).parent A = Path(_lowerCAmelCase ).name A = OnnxRuntimeModel.load_model(_lowerCAmelCase , provider=_lowerCAmelCase , sess_options=_lowerCAmelCase ) return cls(model=_lowerCAmelCase , **_lowerCAmelCase ) @classmethod def A (cls : Tuple , _lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , **_lowerCAmelCase : Optional[int] , ): A = None if len(str(_lowerCAmelCase ).split("""@""" ) ) == 2: A , A = model_id.split("""@""" ) return cls._from_pretrained( model_id=_lowerCAmelCase , revision=_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , **_lowerCAmelCase , )
337
'''simple docstring''' from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def __a ( UpperCAmelCase ) ->List[str]: """simple docstring""" if isinstance(UpperCAmelCase , collections.abc.Iterable ): return x return (x, x) @require_tf class __UpperCAmelCase : '''simple docstring''' def A (self : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] ): pass def A (self : List[str] ): pass def A (self : Union[str, Any] ): pass def A (self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int=None , **_lowerCAmelCase : Dict ): A = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase ) A = TFVisionTextDualEncoderModel(_lowerCAmelCase ) A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) ) def A (self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : int ): A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase ) A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase ) A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def A (self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str=None , **_lowerCAmelCase : List[Any] ): A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase ) A = {"""vision_model""": vision_model, """text_model""": text_model} A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase ) A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def A (self : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=None , **_lowerCAmelCase : Any ): A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase ) A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase ) A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase ) A = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_lowerCAmelCase ) A = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase ) A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase ) A = after_output[0].numpy() A = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_lowerCAmelCase , 1e-5 ) def A (self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Any=None , **_lowerCAmelCase : List[Any] ): A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase ) A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase ) A = model( input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase ) A = output.vision_model_output.attentions self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) A = to_atuple(vision_model.config.image_size ) A = to_atuple(vision_model.config.patch_size ) A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) A = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) A = output.text_model_output.attentions self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def A (self : List[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float ): A = np.abs((a - b) ).max() self.assertLessEqual(_lowerCAmelCase , _lowerCAmelCase , F"""Difference between torch and flax is {diff} (>= {tol}).""" ) def A (self : List[str] ): A = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**_lowerCAmelCase ) def A (self : Optional[int] ): A = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**_lowerCAmelCase ) def A (self : List[Any] ): A = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase ) def A (self : int ): A = self.prepare_config_and_inputs() self.check_save_load(**_lowerCAmelCase ) def A (self : int ): A = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**_lowerCAmelCase ) @slow def A (self : Tuple ): A , A = self.get_pretrained_model_and_inputs() A = model_a(**_lowerCAmelCase ) A = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(_lowerCAmelCase ) A = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase ) A = model_a(**_lowerCAmelCase ) A = after_outputs[0].numpy() A = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_lowerCAmelCase , 1e-5 ) @require_tf class __UpperCAmelCase ( A__ , unittest.TestCase ): '''simple docstring''' def A (self : int ): A = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" ) A = 13 A = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) A = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) A = random_attention_mask([batch_size, 4] ) A = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def A (self : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : int ): A = TFViTModel(_lowerCAmelCase , name="""vision_model""" ) A = TFBertModel(_lowerCAmelCase , name="""text_model""" ) return vision_model, text_model def A (self : Union[str, Any] ): A = TFViTModelTester(self ) A = TFBertModelTester(self ) A = vit_model_tester.prepare_config_and_inputs() A = bert_model_tester.prepare_config_and_inputs() A , A , A = vision_config_and_inputs ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class __UpperCAmelCase ( A__ , unittest.TestCase ): '''simple docstring''' def A (self : Optional[int] ): # DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's # just reinitialize it. A = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" ) A = 13 A = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) A = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) A = random_attention_mask([batch_size, 4] ) A = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def A (self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any]=None , **_lowerCAmelCase : Any ): A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase ) A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase ) A = model( input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase ) A = output.vision_model_output.attentions self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) A = to_atuple(vision_model.config.image_size ) A = to_atuple(vision_model.config.patch_size ) A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) A = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) A = output.text_model_output.attentions self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def A (self : Any , _lowerCAmelCase : Any , _lowerCAmelCase : str ): A = TFDeiTModel(_lowerCAmelCase , name="""vision_model""" ) A = TFRobertaModel(_lowerCAmelCase , name="""text_model""" ) return vision_model, text_model def A (self : str ): A = TFDeiTModelTester(self ) A = TFRobertaModelTester(self ) A = vit_model_tester.prepare_config_and_inputs() A = bert_model_tester.prepare_config_and_inputs() A , A , A = vision_config_and_inputs ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class __UpperCAmelCase ( A__ , unittest.TestCase ): '''simple docstring''' def A (self : Dict ): A = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" ) A = 13 A = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) A = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) A = random_attention_mask([batch_size, 4] ) A = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def A (self : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ): A = TFCLIPVisionModel(_lowerCAmelCase , name="""vision_model""" ) A = TFBertModel(_lowerCAmelCase , name="""text_model""" ) return vision_model, text_model def A (self : Optional[Any] ): A = TFCLIPVisionModelTester(self ) A = TFBertModelTester(self ) A = clip_model_tester.prepare_config_and_inputs() A = bert_model_tester.prepare_config_and_inputs() A , A = vision_config_and_inputs ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class __UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def A (self : Any ): A = TFVisionTextDualEncoderModel.from_pretrained( """clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_lowerCAmelCase ) A = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" ) A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) A = processor( text=["""una foto di un gatto""", """una foto di un cane"""] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""np""" ) A = model(**_lowerCAmelCase ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) A = np.array([[1.2_284_727, 0.3_104_122]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _lowerCAmelCase , atol=1e-3 ) )
337
1
'''simple docstring''' def __magic_name__( lowerCamelCase, lowerCamelCase): __lowerCAmelCase = [1] for i in range(2, lowerCamelCase): factorials.append(factorials[-1] * i) assert 0 <= k < factorials[-1] * n, "k out of bounds" __lowerCAmelCase = [] __lowerCAmelCase = list(range(lowerCamelCase)) # Find permutation while factorials: __lowerCAmelCase = factorials.pop() __lowerCAmelCase , __lowerCAmelCase = divmod(lowerCamelCase, lowerCamelCase) permutation.append(elements[number]) elements.remove(elements[number]) permutation.append(elements[0]) return permutation if __name__ == "__main__": import doctest doctest.testmod()
174
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class a__ ( __A , __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline __UpperCamelCase : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'} __UpperCamelCase : List[str] = PipelineTesterMixin.required_optional_params - {'latents'} __UpperCamelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __UpperCamelCase : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS __UpperCamelCase : int = IMAGE_TO_IMAGE_IMAGE_PARAMS def _snake_case (self ): torch.manual_seed(0 ) __lowerCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=__lowercase , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , ) __lowerCAmelCase = EulerDiscreteScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , ) torch.manual_seed(0 ) __lowerCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) __lowerCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=32 , ) __lowerCAmelCase = CLIPTextModel(__lowercase ) __lowerCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__lowercase ) __lowerCAmelCase = CLIPTextModelWithProjection(__lowercase ) __lowerCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__lowercase ) __lowerCAmelCase = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''text_encoder_2''': text_encoder_a, '''tokenizer_2''': tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def _snake_case (self , __lowercase , __lowercase=0 ): __lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase ) ).to(__lowercase ) __lowerCAmelCase = image / 2 + 0.5 if str(__lowercase ).startswith('''mps''' ): __lowerCAmelCase = torch.manual_seed(__lowercase ) else: __lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase ) __lowerCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 5.0, '''output_type''': '''numpy''', '''strength''': 0.7_5, } return inputs def _snake_case (self ): __lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = StableDiffusionXLImgaImgPipeline(**__lowercase ) __lowerCAmelCase = sd_pipe.to(__lowercase ) sd_pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_dummy_inputs(__lowercase ) __lowerCAmelCase = sd_pipe(**__lowercase ).images __lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCAmelCase = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _snake_case (self ): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 ) def _snake_case (self ): super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def _snake_case (self ): pass def _snake_case (self ): __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = StableDiffusionXLImgaImgPipeline(**__lowercase ) __lowerCAmelCase = sd_pipe.to(__lowercase ) __lowerCAmelCase = sd_pipe.to(__lowercase ) sd_pipe.set_progress_bar_config(disable=__lowercase ) # forward without prompt embeds __lowerCAmelCase = self.get_dummy_inputs(__lowercase ) __lowerCAmelCase = 3 * ['''this is a negative prompt'''] __lowerCAmelCase = negative_prompt __lowerCAmelCase = 3 * [inputs['''prompt''']] __lowerCAmelCase = sd_pipe(**__lowercase ) __lowerCAmelCase = output.images[0, -3:, -3:, -1] # forward with prompt embeds __lowerCAmelCase = self.get_dummy_inputs(__lowercase ) __lowerCAmelCase = 3 * ['''this is a negative prompt'''] __lowerCAmelCase = 3 * [inputs.pop('''prompt''' )] ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = sd_pipe.encode_prompt(__lowercase , negative_prompt=__lowercase ) __lowerCAmelCase = sd_pipe( **__lowercase , prompt_embeds=__lowercase , negative_prompt_embeds=__lowercase , pooled_prompt_embeds=__lowercase , negative_pooled_prompt_embeds=__lowercase , ) __lowerCAmelCase = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 @slow @require_torch_gpu class a__ ( unittest.TestCase ): """simple docstring""" def _snake_case (self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case (self , __lowercase , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=0 ): __lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase ) __lowerCAmelCase = np.random.RandomState(__lowercase ).standard_normal((1, 4, 64, 64) ) __lowerCAmelCase = torch.from_numpy(__lowercase ).to(device=__lowercase , dtype=__lowercase ) __lowerCAmelCase = { '''prompt''': '''a photograph of an astronaut riding a horse''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def _snake_case (self ): __lowerCAmelCase = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' ) pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_inputs(__lowercase ) __lowerCAmelCase = pipe(**__lowercase ).images __lowerCAmelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_12, 5_12, 3) __lowerCAmelCase = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] ) assert np.abs(image_slice - expected_slice ).max() < 7e-3
174
1
class lowercase_ : def __init__( self , __UpperCamelCase = "" , __UpperCamelCase = False ): """simple docstring""" UpperCamelCase_ = {} # A node will be a leaf if the tree contains its word UpperCamelCase_ = is_leaf UpperCamelCase_ = prefix def lowerCamelCase_ ( self , __UpperCamelCase ): """simple docstring""" UpperCamelCase_ = 0 for q, w in zip(self.prefix , __UpperCamelCase ): if q != w: break x += 1 return self.prefix[:x], self.prefix[x:], word[x:] def lowerCamelCase_ ( self , __UpperCamelCase ): """simple docstring""" for word in words: self.insert(__UpperCamelCase ) def lowerCamelCase_ ( self , __UpperCamelCase ): """simple docstring""" if self.prefix == word: UpperCamelCase_ = True # Case 2: The node has no edges that have a prefix to the word # Solution: We create an edge from the current node to a new one # containing the word elif word[0] not in self.nodes: UpperCamelCase_ = RadixNode(prefix=__UpperCamelCase , is_leaf=__UpperCamelCase ) else: UpperCamelCase_ = self.nodes[word[0]] UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = incoming_node.match( __UpperCamelCase ) # Case 3: The node prefix is equal to the matching # Solution: We insert remaining word on the next node if remaining_prefix == "": self.nodes[matching_string[0]].insert(__UpperCamelCase ) # Case 4: The word is greater equal to the matching # Solution: Create a node in between both nodes, change # prefixes and add the new node for the remaining word else: UpperCamelCase_ = remaining_prefix UpperCamelCase_ = self.nodes[matching_string[0]] UpperCamelCase_ = RadixNode(__UpperCamelCase , __UpperCamelCase ) UpperCamelCase_ = aux_node if remaining_word == "": UpperCamelCase_ = True else: self.nodes[matching_string[0]].insert(__UpperCamelCase ) def lowerCamelCase_ ( self , __UpperCamelCase ): """simple docstring""" UpperCamelCase_ = self.nodes.get(word[0] , __UpperCamelCase ) if not incoming_node: return False else: UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = incoming_node.match( __UpperCamelCase ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # This applies when the word and the prefix are equal elif remaining_word == "": return incoming_node.is_leaf # We have word remaining so we check the next node else: return incoming_node.find(__UpperCamelCase ) def lowerCamelCase_ ( self , __UpperCamelCase ): """simple docstring""" UpperCamelCase_ = self.nodes.get(word[0] , __UpperCamelCase ) if not incoming_node: return False else: UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = incoming_node.match( __UpperCamelCase ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # We have word remaining so we check the next node elif remaining_word != "": return incoming_node.delete(__UpperCamelCase ) else: # If it is not a leaf, we don't have to delete if not incoming_node.is_leaf: return False else: # We delete the nodes if no edges go from it if len(incoming_node.nodes ) == 0: del self.nodes[word[0]] # We merge the current node with its only child if len(self.nodes ) == 1 and not self.is_leaf: UpperCamelCase_ = list(self.nodes.values() )[0] UpperCamelCase_ = merging_node.is_leaf self.prefix += merging_node.prefix UpperCamelCase_ = merging_node.nodes # If there is more than 1 edge, we just mark it as non-leaf elif len(incoming_node.nodes ) > 1: UpperCamelCase_ = False # If there is 1 edge, we merge it with its child else: UpperCamelCase_ = list(incoming_node.nodes.values() )[0] UpperCamelCase_ = merging_node.is_leaf incoming_node.prefix += merging_node.prefix UpperCamelCase_ = merging_node.nodes return True def lowerCamelCase_ ( self , __UpperCamelCase = 0 ): """simple docstring""" if self.prefix != "": print("""-""" * height , self.prefix , """ (leaf)""" if self.is_leaf else """""" ) for value in self.nodes.values(): value.print_tree(height + 1 ) def lowerCamelCase__ ( ) -> bool: UpperCamelCase_ = """banana bananas bandana band apple all beast""".split() UpperCamelCase_ = RadixNode() root.insert_many(a__ ) assert all(root.find(a__ ) for word in words ) assert not root.find("""bandanas""" ) assert not root.find("""apps""" ) root.delete("""all""" ) assert not root.find("""all""" ) root.delete("""banana""" ) assert not root.find("""banana""" ) assert root.find("""bananas""" ) return True def lowerCamelCase__ ( ) -> None: assert test_trie() def lowerCamelCase__ ( ) -> None: UpperCamelCase_ = RadixNode() UpperCamelCase_ = """banana bananas bandanas bandana band apple all beast""".split() root.insert_many(a__ ) print("""Words:""" , a__ ) print("""Tree:""" ) root.print_tree() if __name__ == "__main__": main()
261
import copy import re class lowercase_ : A__ : Optional[Any] = """hp""" A__ : Union[str, Any] = {} A__ : Optional[int] = None @classmethod def lowerCamelCase_ ( cls , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" UpperCamelCase_ = prefix UpperCamelCase_ = defaults cls.build_naming_info() @staticmethod def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" if len(__UpperCamelCase ) == 0: return "" UpperCamelCase_ = None if any(char.isdigit() for char in word ): raise Exception(f'''Parameters should not contain numbers: \'{word}\' contains a number''' ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(__UpperCamelCase ) + 1 ): UpperCamelCase_ = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: UpperCamelCase_ = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(__UpperCamelCase ): UpperCamelCase_ = """""" while integer != 0: UpperCamelCase_ = chr(ord("""A""" ) + integer % 1_0 ) + s integer //= 1_0 return s UpperCamelCase_ = 0 while True: UpperCamelCase_ = word + """#""" + int_to_alphabetic(__UpperCamelCase ) if sword in info["reverse_short_word"]: continue else: UpperCamelCase_ = sword break UpperCamelCase_ = short_word UpperCamelCase_ = word return short_word @staticmethod def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" UpperCamelCase_ = param_name.split("""_""" ) UpperCamelCase_ = [TrialShortNamer.shortname_for_word(__UpperCamelCase , __UpperCamelCase ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name UpperCamelCase_ = ["""""", """_"""] for separator in separators: UpperCamelCase_ = separator.join(__UpperCamelCase ) if shortname not in info["reverse_short_param"]: UpperCamelCase_ = shortname UpperCamelCase_ = param_name return shortname return param_name @staticmethod def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" UpperCamelCase_ = TrialShortNamer.shortname_for_key(__UpperCamelCase , __UpperCamelCase ) UpperCamelCase_ = short_name UpperCamelCase_ = param_name @classmethod def lowerCamelCase_ ( cls ): """simple docstring""" if cls.NAMING_INFO is not None: return UpperCamelCase_ = { """short_word""": {}, """reverse_short_word""": {}, """short_param""": {}, """reverse_short_param""": {}, } UpperCamelCase_ = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(__UpperCamelCase , __UpperCamelCase ) UpperCamelCase_ = info @classmethod def lowerCamelCase_ ( cls , __UpperCamelCase ): """simple docstring""" cls.build_naming_info() assert cls.PREFIX is not None UpperCamelCase_ = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(f'''You should provide a default value for the param name {k} with value {v}''' ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue UpperCamelCase_ = cls.NAMING_INFO["""short_param"""][k] if isinstance(__UpperCamelCase , __UpperCamelCase ): UpperCamelCase_ = 1 if v else 0 UpperCamelCase_ = """""" if isinstance(__UpperCamelCase , (int, float) ) else """-""" UpperCamelCase_ = f'''{key}{sep}{v}''' name.append(__UpperCamelCase ) return "_".join(__UpperCamelCase ) @classmethod def lowerCamelCase_ ( cls , __UpperCamelCase ): """simple docstring""" UpperCamelCase_ = repr[len(cls.PREFIX ) + 1 :] if repr == "": UpperCamelCase_ = [] else: UpperCamelCase_ = repr.split("""_""" ) UpperCamelCase_ = {} for value in values: if "-" in value: UpperCamelCase_ , UpperCamelCase_ = value.split("""-""" ) else: UpperCamelCase_ = re.sub("""[0-9.]""" , """""" , __UpperCamelCase ) UpperCamelCase_ = float(re.sub("""[^0-9.]""" , """""" , __UpperCamelCase ) ) UpperCamelCase_ = cls.NAMING_INFO["""reverse_short_param"""][p_k] UpperCamelCase_ = p_v for k in cls.DEFAULTS: if k not in parameters: UpperCamelCase_ = cls.DEFAULTS[k] return parameters
261
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Any = KandinskyVaaImgaImgPipeline __lowerCAmelCase : Any = ['image_embeds', 'negative_image_embeds', 'image'] __lowerCAmelCase : Optional[int] = [ 'image_embeds', 'negative_image_embeds', 'image', ] __lowerCAmelCase : Tuple = [ 'generator', 'height', 'width', 'strength', 'guidance_scale', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] __lowerCAmelCase : Optional[Any] = False @property def SCREAMING_SNAKE_CASE ( self ) -> List[Any]: '''simple docstring''' return 32 @property def SCREAMING_SNAKE_CASE ( self ) -> Dict: '''simple docstring''' return 32 @property def SCREAMING_SNAKE_CASE ( self ) -> str: '''simple docstring''' return self.time_input_dim @property def SCREAMING_SNAKE_CASE ( self ) -> Dict: '''simple docstring''' return self.time_input_dim * 4 @property def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: '''simple docstring''' return 100 @property def SCREAMING_SNAKE_CASE ( self ) -> List[str]: '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase : Optional[Any] = { """in_channels""": 4, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } UpperCAmelCase : Optional[int] = UNetaDConditionModel(**_SCREAMING_SNAKE_CASE ) return model @property def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def SCREAMING_SNAKE_CASE ( self ) -> List[Any]: '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase : Tuple = VQModel(**self.dummy_movq_kwargs ) return model def SCREAMING_SNAKE_CASE ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : Any = self.dummy_unet UpperCAmelCase : int = self.dummy_movq UpperCAmelCase : Any = { """num_train_timesteps""": 1000, """beta_schedule""": """linear""", """beta_start""": 0.0_0085, """beta_end""": 0.012, """clip_sample""": False, """set_alpha_to_one""": False, """steps_offset""": 0, """prediction_type""": """epsilon""", """thresholding""": False, } UpperCAmelCase : int = DDIMScheduler(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[int] = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ) -> int: '''simple docstring''' UpperCAmelCase : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( _SCREAMING_SNAKE_CASE ) # create init_image UpperCAmelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase : str = Image.fromarray(np.uinta(_SCREAMING_SNAKE_CASE ) ).convert("""RGB""" ).resize((256, 256) ) if str(_SCREAMING_SNAKE_CASE ).startswith("""mps""" ): UpperCAmelCase : int = torch.manual_seed(_SCREAMING_SNAKE_CASE ) else: UpperCAmelCase : Dict = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : int = { """image""": init_image, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 10, """guidance_scale""": 7.0, """strength""": 0.2, """output_type""": """np""", } return inputs def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : Tuple = """cpu""" UpperCAmelCase : int = self.get_dummy_components() UpperCAmelCase : Tuple = self.pipeline_class(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase : str = pipe.to(_SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[Any] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) ) UpperCAmelCase : Tuple = output.images UpperCAmelCase : Optional[int] = pipe( **self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) , return_dict=_SCREAMING_SNAKE_CASE , )[0] UpperCAmelCase : str = image[0, -3:, -3:, -1] UpperCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase : List[str] = np.array( [0.619_9778, 0.6398_4406, 0.4614_5785, 0.6294_4984, 0.562_2215, 0.4730_6132, 0.4744_1456, 0.460_7606, 0.4871_9263] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : Optional[int] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_img2img_frog.npy""" ) UpperCAmelCase : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) UpperCAmelCase : List[Any] = """A red cartoon frog, 4k""" UpperCAmelCase : List[str] = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Union[str, Any] = KandinskyVaaImgaImgPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa ) UpperCAmelCase : int = pipeline.to(_SCREAMING_SNAKE_CASE ) pipeline.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : str = torch.Generator(device="""cpu""" ).manual_seed(0 ) UpperCAmelCase , UpperCAmelCase : Any = pipe_prior( _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() UpperCAmelCase : List[Any] = pipeline( image=_SCREAMING_SNAKE_CASE , image_embeds=_SCREAMING_SNAKE_CASE , negative_image_embeds=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , ) UpperCAmelCase : Optional[int] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
109
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A_ : Optional[int] = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : str = [ 'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'IBertForMaskedLM', 'IBertForMultipleChoice', 'IBertForQuestionAnswering', 'IBertForSequenceClassification', 'IBertForTokenClassification', 'IBertModel', 'IBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys A_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
192
0
import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , unittest.TestCase ): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @property def UpperCamelCase ( self ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def UpperCamelCase ( self ): A__ = ort.SessionOptions() A__ = False return options def UpperCamelCase ( self ): A__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) A__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) A__ = OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''',revision='''onnx''',safety_checker=__lowerCamelCase,feature_extractor=__lowerCamelCase,provider=self.gpu_provider,sess_options=self.gpu_options,) pipe.set_progress_bar_config(disable=__lowerCamelCase ) A__ = '''A red cat sitting on a park bench''' A__ = np.random.RandomState(0 ) A__ = pipe( prompt=__lowerCamelCase,image=__lowerCamelCase,mask_image=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=10,generator=__lowerCamelCase,output_type='''np''',) A__ = output.images A__ = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) A__ = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def UpperCamelCase ( self ): A__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) A__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) A__ = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-inpainting''',subfolder='''scheduler''',revision='''onnx''' ) A__ = OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''',revision='''onnx''',scheduler=__lowerCamelCase,safety_checker=__lowerCamelCase,feature_extractor=__lowerCamelCase,provider=self.gpu_provider,sess_options=self.gpu_options,) pipe.set_progress_bar_config(disable=__lowerCamelCase ) A__ = '''A red cat sitting on a park bench''' A__ = np.random.RandomState(0 ) A__ = pipe( prompt=__lowerCamelCase,image=__lowerCamelCase,mask_image=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=20,generator=__lowerCamelCase,output_type='''np''',) A__ = output.images A__ = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) A__ = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
370
from __future__ import annotations import time import numpy as np a__: Optional[Any] = [8, 5, 9, 7] a__: Dict = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] a__: List[Any] = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class SCREAMING_SNAKE_CASE__ : def __init__( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,): A__ = claim_vector A__ = allocated_resources_table A__ = maximum_claim_table def UpperCamelCase ( self ): return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def UpperCamelCase ( self ): return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def UpperCamelCase ( self ): return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def UpperCamelCase ( self ): return {self.__need().index(__lowerCamelCase ): i for i in self.__need()} def UpperCamelCase ( self,**__lowerCamelCase ): A__ = self.__need() A__ = self.__allocated_resources_table A__ = self.__available_resources() A__ = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print('''_''' * 50 + '''\n''' ) while need_list: A__ = False for each_need in need_list: A__ = True for index, need in enumerate(__lowerCamelCase ): if need > available_resources[index]: A__ = False break if execution: A__ = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: A__ = original_need_index print(f"Process {process_number + 1} is executing." ) # remove the process run from stack need_list.remove(__lowerCamelCase ) # update available/freed resources stack A__ = np.array(__lowerCamelCase ) + np.array( alloc_resources_table[process_number] ) print( '''Updated available resource stack for processes: ''' + ''' '''.join([str(__lowerCamelCase ) for x in available_resources] ) ) break if safe: print('''The process is in a safe state.\n''' ) else: print('''System in unsafe state. Aborting...\n''' ) break def UpperCamelCase ( self ): print(''' ''' * 9 + '''Allocated Resource Table''' ) for item in self.__allocated_resources_table: print( f"P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}" + ''' '''.join(f"{it:>8}" for it in item ) + '''\n''' ) print(''' ''' * 9 + '''System Resource Table''' ) for item in self.__maximum_claim_table: print( f"P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}" + ''' '''.join(f"{it:>8}" for it in item ) + '''\n''' ) print( '''Current Usage by Active Processes: ''' + ''' '''.join(str(__lowerCamelCase ) for x in self.__claim_vector ) ) print( '''Initial Available Resources: ''' + ''' '''.join(str(__lowerCamelCase ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
39
0
'''simple docstring''' import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def __snake_case( _lowerCAmelCase ) -> str: snake_case__ : str = [] for line in lines: snake_case__ : Tuple = re.sub(r"""#.*""" , """""" , _lowerCAmelCase ) # remove comments if line: filtered_lines.append(_lowerCAmelCase ) snake_case__ : Optional[Any] = """\n""".join(_lowerCAmelCase ) # Make a hash from all this code snake_case__ : str = full_str.encode("""utf-8""" ) return shaaaa(_lowerCAmelCase ).hexdigest() # get importable module names and hash for caching __a = { "csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), "json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), "pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), "parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), "arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), "text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), "imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), "audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions __a = { ".csv": ("csv", {}), ".tsv": ("csv", {"sep": "\t"}), ".json": ("json", {}), ".jsonl": ("json", {}), ".parquet": ("parquet", {}), ".arrow": ("arrow", {}), ".txt": ("text", {}), } _EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) __a = {"imagefolder", "audiofolder"} # Used to filter data files based on extensions given a module name __a = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append(".zip") _MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
35
"""simple docstring""" import math def _snake_case ( lowerCamelCase__ : list , lowerCamelCase__ : int ) -> int: lowerCamelCase_ : int =len(lowerCamelCase__ ) lowerCamelCase_ : List[Any] =int(math.floor(math.sqrt(lowerCamelCase__ ) ) ) lowerCamelCase_ : List[Any] =0 while arr[min(lowerCamelCase__ , lowerCamelCase__ ) - 1] < x: lowerCamelCase_ : str =step step += int(math.floor(math.sqrt(lowerCamelCase__ ) ) ) if prev >= n: return -1 while arr[prev] < x: lowerCamelCase_ : Dict =prev + 1 if prev == min(lowerCamelCase__ , lowerCamelCase__ ): return -1 if arr[prev] == x: return prev return -1 if __name__ == "__main__": A__ : List[Any] = input('Enter numbers separated by a comma:\n').strip() A__ : Optional[Any] = [int(item) for item in user_input.split(',')] A__ : List[str] = int(input('Enter the number to be searched:\n')) A__ : Any = jump_search(arr, x) if res == -1: print('Number not found!') else: print(f'Number {x} is at index {res}')
144
0
import argparse import math import traceback import dateutil.parser as date_parser import requests def lowercase__ ( __snake_case : Tuple ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = {} UpperCAmelCase_ : str = job['started_at'] UpperCAmelCase_ : str = job['completed_at'] UpperCAmelCase_ : int = date_parser.parse(__snake_case ) UpperCAmelCase_ : List[str] = date_parser.parse(__snake_case ) UpperCAmelCase_ : Optional[int] = round((end_datetime - start_datetime).total_seconds() / 60.0 ) UpperCAmelCase_ : Union[str, Any] = start UpperCAmelCase_ : Any = end UpperCAmelCase_ : Union[str, Any] = duration_in_min return job_info def lowercase__ ( __snake_case : Tuple , __snake_case : int=None ): '''simple docstring''' UpperCAmelCase_ : int = None if token is not None: UpperCAmelCase_ : Optional[Any] = {'Accept': 'application/vnd.github+json', 'Authorization': F"Bearer {token}"} UpperCAmelCase_ : List[Any] = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100" UpperCAmelCase_ : List[str] = requests.get(__snake_case , headers=__snake_case ).json() UpperCAmelCase_ : Dict = {} try: job_time.update({job['name']: extract_time_from_single_job(__snake_case ) for job in result['jobs']} ) UpperCAmelCase_ : Tuple = math.ceil((result['total_count'] - 100) / 100 ) for i in range(__snake_case ): UpperCAmelCase_ : Optional[int] = requests.get(url + F"&page={i + 2}" , headers=__snake_case ).json() job_time.update({job['name']: extract_time_from_single_job(__snake_case ) for job in result['jobs']} ) return job_time except Exception: print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" ) return {} if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') __UpperCAmelCase = parser.parse_args() __UpperCAmelCase = get_job_time(args.workflow_run_id) __UpperCAmelCase = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(F'{k}: {v["duration"]}')
145
import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets __UpperCAmelCase = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n' __UpperCAmelCase = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n' __UpperCAmelCase = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n' def lowercase__ ( __snake_case : Optional[int] ): '''simple docstring''' def remove_articles(__snake_case : Tuple ): UpperCAmelCase_ : Optional[int] = re.compile(R'\b(a|an|the)\b' , re.UNICODE ) return re.sub(__snake_case , ' ' , __snake_case ) def white_space_fix(__snake_case : int ): return " ".join(text.split() ) def remove_punc(__snake_case : int ): UpperCAmelCase_ : Optional[Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__snake_case : List[str] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__snake_case ) ) ) ) def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any] ): '''simple docstring''' return int(normalize_answer(__snake_case ) == normalize_answer(__snake_case ) ) def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Tuple ): '''simple docstring''' UpperCAmelCase_ : Tuple = [any(compute_exact(__snake_case , __snake_case ) for ref in refs ) for pred, refs in zip(__snake_case , __snake_case )] return (sum(__snake_case ) / len(__snake_case )) * 100 def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ : str = [rgram for rgrams in rgramslist for rgram in rgrams] UpperCAmelCase_ : str = Counter(__snake_case ) UpperCAmelCase_ : List[Any] = Counter(__snake_case ) UpperCAmelCase_ : int = Counter() for sgram, scount in sgramcounter.items(): UpperCAmelCase_ : Any = scount * numref UpperCAmelCase_ : List[Any] = Counter(__snake_case ) UpperCAmelCase_ : Dict = Counter() for cgram, ccount in cgramcounter.items(): UpperCAmelCase_ : int = ccount * numref # KEEP UpperCAmelCase_ : Optional[Any] = sgramcounter_rep & cgramcounter_rep UpperCAmelCase_ : Any = keepgramcounter_rep & rgramcounter UpperCAmelCase_ : Union[str, Any] = sgramcounter_rep & rgramcounter UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : List[Any] = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCAmelCase_ : Optional[Any] = 1 UpperCAmelCase_ : Optional[Any] = 1 if len(__snake_case ) > 0: UpperCAmelCase_ : List[str] = keeptmpscorea / len(__snake_case ) if len(__snake_case ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) UpperCAmelCase_ : List[Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() ) UpperCAmelCase_ : List[Any] = 0 if keepscore_precision > 0 or keepscore_recall > 0: UpperCAmelCase_ : List[Any] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION UpperCAmelCase_ : Optional[int] = sgramcounter_rep - cgramcounter_rep UpperCAmelCase_ : Dict = delgramcounter_rep - rgramcounter UpperCAmelCase_ : Optional[Any] = sgramcounter_rep - rgramcounter UpperCAmelCase_ : str = 0 UpperCAmelCase_ : str = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCAmelCase_ : List[Any] = 1 if len(__snake_case ) > 0: UpperCAmelCase_ : Dict = deltmpscorea / len(__snake_case ) # ADDITION UpperCAmelCase_ : Tuple = set(__snake_case ) - set(__snake_case ) UpperCAmelCase_ : Union[str, Any] = set(__snake_case ) & set(__snake_case ) UpperCAmelCase_ : Dict = set(__snake_case ) - set(__snake_case ) UpperCAmelCase_ : List[str] = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCAmelCase_ : List[str] = 1 UpperCAmelCase_ : Any = 1 if len(__snake_case ) > 0: UpperCAmelCase_ : Dict = addtmpscore / len(__snake_case ) if len(__snake_case ) > 0: UpperCAmelCase_ : Optional[int] = addtmpscore / len(__snake_case ) UpperCAmelCase_ : Optional[Any] = 0 if addscore_precision > 0 or addscore_recall > 0: UpperCAmelCase_ : List[str] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def lowercase__ ( __snake_case : str , __snake_case : Any , __snake_case : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ : int = len(__snake_case ) UpperCAmelCase_ : List[str] = ssent.split(' ' ) UpperCAmelCase_ : Union[str, Any] = csent.split(' ' ) UpperCAmelCase_ : List[str] = [] UpperCAmelCase_ : List[Any] = [] UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : Optional[Any] = [] UpperCAmelCase_ : Optional[Any] = [] UpperCAmelCase_ : int = [] UpperCAmelCase_ : List[Any] = [] UpperCAmelCase_ : List[str] = [] UpperCAmelCase_ : Optional[Any] = [] UpperCAmelCase_ : Tuple = [] for rsent in rsents: UpperCAmelCase_ : List[Any] = rsent.split(' ' ) UpperCAmelCase_ : Any = [] UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : str = [] ragramslist.append(__snake_case ) for i in range(0 , len(__snake_case ) - 1 ): if i < len(__snake_case ) - 1: UpperCAmelCase_ : Tuple = ragrams[i] + ' ' + ragrams[i + 1] ragrams.append(__snake_case ) if i < len(__snake_case ) - 2: UpperCAmelCase_ : List[str] = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] ragrams.append(__snake_case ) if i < len(__snake_case ) - 3: UpperCAmelCase_ : Union[str, Any] = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] + ' ' + ragrams[i + 3] ragrams.append(__snake_case ) ragramslist.append(__snake_case ) ragramslist.append(__snake_case ) ragramslist.append(__snake_case ) for i in range(0 , len(__snake_case ) - 1 ): if i < len(__snake_case ) - 1: UpperCAmelCase_ : str = sagrams[i] + ' ' + sagrams[i + 1] sagrams.append(__snake_case ) if i < len(__snake_case ) - 2: UpperCAmelCase_ : List[str] = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] sagrams.append(__snake_case ) if i < len(__snake_case ) - 3: UpperCAmelCase_ : Any = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] + ' ' + sagrams[i + 3] sagrams.append(__snake_case ) for i in range(0 , len(__snake_case ) - 1 ): if i < len(__snake_case ) - 1: UpperCAmelCase_ : Optional[int] = cagrams[i] + ' ' + cagrams[i + 1] cagrams.append(__snake_case ) if i < len(__snake_case ) - 2: UpperCAmelCase_ : Tuple = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] cagrams.append(__snake_case ) if i < len(__snake_case ) - 3: UpperCAmelCase_ : Union[str, Any] = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] + ' ' + cagrams[i + 3] cagrams.append(__snake_case ) ((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) : int = SARIngram(__snake_case , __snake_case , __snake_case , __snake_case ) ((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) : str = SARIngram(__snake_case , __snake_case , __snake_case , __snake_case ) ((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) : Tuple = SARIngram(__snake_case , __snake_case , __snake_case , __snake_case ) ((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) : int = SARIngram(__snake_case , __snake_case , __snake_case , __snake_case ) UpperCAmelCase_ : List[str] = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 UpperCAmelCase_ : Optional[Any] = sum([delascore, delascore, delascore, delascore] ) / 4 UpperCAmelCase_ : List[str] = sum([addascore, addascore, addascore, addascore] ) / 4 UpperCAmelCase_ : Dict = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def lowercase__ ( __snake_case : List[Any] , __snake_case : bool = True , __snake_case : str = "13a" , __snake_case : bool = True ): '''simple docstring''' if lowercase: UpperCAmelCase_ : Optional[Any] = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: UpperCAmelCase_ : Union[str, Any] = sacrebleu.metrics.bleu._get_tokenizer(__snake_case )()(__snake_case ) else: UpperCAmelCase_ : Union[str, Any] = sacrebleu.TOKENIZERS[tokenizer]()(__snake_case ) elif tokenizer == "moses": UpperCAmelCase_ : Optional[Any] = sacremoses.MosesTokenizer().tokenize(__snake_case , return_str=__snake_case , escape=__snake_case ) elif tokenizer == "penn": UpperCAmelCase_ : Dict = sacremoses.MosesTokenizer().penn_tokenize(__snake_case , return_str=__snake_case ) else: UpperCAmelCase_ : int = sentence if not return_str: UpperCAmelCase_ : Any = normalized_sent.split() return normalized_sent def lowercase__ ( __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Dict ): '''simple docstring''' if not (len(__snake_case ) == len(__snake_case ) == len(__snake_case )): raise ValueError('Sources length must match predictions and references lengths.' ) UpperCAmelCase_ : Tuple = 0 for src, pred, refs in zip(__snake_case , __snake_case , __snake_case ): sari_score += SARIsent(normalize(__snake_case ) , normalize(__snake_case ) , [normalize(__snake_case ) for sent in refs] ) UpperCAmelCase_ : Any = sari_score / len(__snake_case ) return 100 * sari_score def lowercase__ ( __snake_case : int , __snake_case : Union[str, Any] , __snake_case : str="exp" , __snake_case : Any=None , __snake_case : Union[str, Any]=False , __snake_case : Union[str, Any]=False , __snake_case : List[str]=False , ): '''simple docstring''' UpperCAmelCase_ : int = len(references[0] ) if any(len(__snake_case ) != references_per_prediction for refs in references ): raise ValueError('Sacrebleu requires the same number of references for each prediction' ) UpperCAmelCase_ : Dict = [[refs[i] for refs in references] for i in range(__snake_case )] UpperCAmelCase_ : str = sacrebleu.corpus_bleu( __snake_case , __snake_case , smooth_method=__snake_case , smooth_value=__snake_case , force=__snake_case , lowercase=__snake_case , use_effective_order=__snake_case , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase (datasets.Metric ): '''simple docstring''' def __UpperCAmelCase ( self ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ), } ) , codebase_urls=[ 'https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py', 'https://github.com/cocoxu/simplification/blob/master/SARI.py', 'https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py', 'https://github.com/mjpost/sacreBLEU', ] , reference_urls=[ 'https://www.aclweb.org/anthology/Q16-1029.pdf', 'https://github.com/mjpost/sacreBLEU', 'https://en.wikipedia.org/wiki/BLEU', 'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213', ] , ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str: UpperCAmelCase_ : List[Any] = {} result.update({'sari': compute_sari(sources=_UpperCamelCase , predictions=_UpperCamelCase , references=_UpperCamelCase )} ) result.update({'sacrebleu': compute_sacrebleu(predictions=_UpperCamelCase , references=_UpperCamelCase )} ) result.update({'exact': compute_em(predictions=_UpperCamelCase , references=_UpperCamelCase )} ) return result
145
1
from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class A__ : def __init__( self , A_ = None ): '''simple docstring''' if components is None: UpperCamelCase : Any = [] UpperCamelCase : List[str] = list(A_ ) def __len__( self ): '''simple docstring''' return len(self.__components ) def __str__( self ): '''simple docstring''' return "(" + ",".join(map(A_ , self.__components ) ) + ")" def __add__( self , A_ ): '''simple docstring''' UpperCamelCase : Optional[int] = len(self ) if size == len(A_ ): UpperCamelCase : Optional[Any] = [self.__components[i] + other.component(A_ ) for i in range(A_ )] return Vector(A_ ) else: raise Exception("must have the same size" ) def __sub__( self , A_ ): '''simple docstring''' UpperCamelCase : Tuple = len(self ) if size == len(A_ ): UpperCamelCase : Tuple = [self.__components[i] - other.component(A_ ) for i in range(A_ )] return Vector(A_ ) else: # error case raise Exception("must have the same size" ) @overload def __mul__( self , A_ ): '''simple docstring''' ... @overload def __mul__( self , A_ ): '''simple docstring''' ... def __mul__( self , A_ ): '''simple docstring''' if isinstance(A_ , (float, int) ): UpperCamelCase : Optional[Any] = [c * other for c in self.__components] return Vector(A_ ) elif isinstance(A_ , A_ ) and len(self ) == len(A_ ): UpperCamelCase : Tuple = len(self ) UpperCamelCase : Optional[int] = [self.__components[i] * other.component(A_ ) for i in range(A_ )] return sum(A_ ) else: # error case raise Exception("invalid operand!" ) def __UpperCamelCase( self ): '''simple docstring''' return Vector(self.__components ) def __UpperCamelCase( self , A_ ): '''simple docstring''' if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ): return self.__components[i] else: raise Exception("index out of range" ) def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' assert -len(self.__components ) <= pos < len(self.__components ) UpperCamelCase : Any = value def __UpperCamelCase( self ): '''simple docstring''' if len(self.__components ) == 0: raise Exception("Vector is empty" ) UpperCamelCase : Dict = [c**2 for c in self.__components] return math.sqrt(sum(A_ ) ) def __UpperCamelCase( self , A_ , A_ = False ): '''simple docstring''' UpperCamelCase : int = self * other UpperCamelCase : str = self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den ) ) else: return math.acos(num / den ) def A_ ( _lowerCAmelCase ) -> Vector: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) return Vector([0] * dimension ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Vector: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (isinstance(_lowerCAmelCase , _lowerCAmelCase )) UpperCamelCase : Dict = [0] * dimension UpperCamelCase : str = 1 return Vector(_lowerCAmelCase ) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Vector: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (isinstance(_lowerCAmelCase , (int, float) )) ) return x * scalar + y def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Vector: random.seed(_lowerCAmelCase ) UpperCamelCase : str = [random.randint(_lowerCAmelCase , _lowerCAmelCase ) for _ in range(_lowerCAmelCase )] return Vector(_lowerCAmelCase ) class A__ : def __init__( self , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : List[str] = matrix UpperCamelCase : List[str] = w UpperCamelCase : Any = h def __str__( self ): '''simple docstring''' UpperCamelCase : Dict = "" for i in range(self.__height ): ans += "|" for j in range(self.__width ): if j < self.__width - 1: ans += str(self.__matrix[i][j] ) + "," else: ans += str(self.__matrix[i][j] ) + "|\n" return ans def __add__( self , A_ ): '''simple docstring''' if self.__width == other.width() and self.__height == other.height(): UpperCamelCase : Optional[Any] = [] for i in range(self.__height ): UpperCamelCase : Optional[int] = [ self.__matrix[i][j] + other.component(A_ , A_ ) for j in range(self.__width ) ] matrix.append(A_ ) return Matrix(A_ , self.__width , self.__height ) else: raise Exception("matrix must have the same dimension!" ) def __sub__( self , A_ ): '''simple docstring''' if self.__width == other.width() and self.__height == other.height(): UpperCamelCase : List[Any] = [] for i in range(self.__height ): UpperCamelCase : Dict = [ self.__matrix[i][j] - other.component(A_ , A_ ) for j in range(self.__width ) ] matrix.append(A_ ) return Matrix(A_ , self.__width , self.__height ) else: raise Exception("matrices must have the same dimension!" ) @overload def __mul__( self , A_ ): '''simple docstring''' ... @overload def __mul__( self , A_ ): '''simple docstring''' ... def __mul__( self , A_ ): '''simple docstring''' if isinstance(A_ , A_ ): # matrix-vector if len(A_ ) == self.__width: UpperCamelCase : int = zero_vector(self.__height ) for i in range(self.__height ): UpperCamelCase : int = [ self.__matrix[i][j] * other.component(A_ ) for j in range(self.__width ) ] ans.change_component(A_ , sum(A_ ) ) return ans else: raise Exception( "vector must have the same size as the " "number of columns of the matrix!" ) elif isinstance(A_ , (int, float) ): # matrix-scalar UpperCamelCase : Dict = [ [self.__matrix[i][j] * other for j in range(self.__width )] for i in range(self.__height ) ] return Matrix(A_ , self.__width , self.__height ) return None def __UpperCamelCase( self ): '''simple docstring''' return self.__height def __UpperCamelCase( self ): '''simple docstring''' return self.__width def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception("change_component: indices out of bounds" ) def __UpperCamelCase( self , A_ , A_ , A_ ): '''simple docstring''' if 0 <= x < self.__height and 0 <= y < self.__width: UpperCamelCase : List[str] = value else: raise Exception("change_component: indices out of bounds" ) def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' if self.__height != self.__width: raise Exception("Matrix is not square" ) UpperCamelCase : List[Any] = self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(A_ ) ): UpperCamelCase : int = minor[i][:y] + minor[i][y + 1 :] return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant() def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' if self.__height != self.__width: raise Exception("Matrix is not square" ) if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(A_ , A_ ) else: raise Exception("Indices out of bounds" ) def __UpperCamelCase( self ): '''simple docstring''' if self.__height != self.__width: raise Exception("Matrix is not square" ) if self.__height < 1: raise Exception("Matrix has no element" ) elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: UpperCamelCase : Union[str, Any] = [ self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width ) ] return sum(A_ ) def A_ ( _lowerCAmelCase ) -> Matrix: UpperCamelCase : list[list[float]] = [[0] * n for _ in range(_lowerCAmelCase )] return Matrix(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Matrix: random.seed(_lowerCAmelCase ) UpperCamelCase : list[list[float]] = [ [random.randint(_lowerCAmelCase , _lowerCAmelCase ) for _ in range(_lowerCAmelCase )] for _ in range(_lowerCAmelCase ) ] return Matrix(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
52
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case_ : int = logging.get_logger(__name__) snake_case_ : Optional[Any] = { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json', } class lowercase__ ( lowercase ): lowercase__ = """mvp""" lowercase__ = ["""past_key_values"""] lowercase__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self : List[Any] ,lowerCamelCase__ : Any=50267 ,lowerCamelCase__ : Optional[int]=1024 ,lowerCamelCase__ : int=12 ,lowerCamelCase__ : Tuple=4096 ,lowerCamelCase__ : Union[str, Any]=16 ,lowerCamelCase__ : List[Any]=12 ,lowerCamelCase__ : Tuple=4096 ,lowerCamelCase__ : Any=16 ,lowerCamelCase__ : Optional[int]=0.0 ,lowerCamelCase__ : Optional[int]=0.0 ,lowerCamelCase__ : str="gelu" ,lowerCamelCase__ : Optional[int]=1024 ,lowerCamelCase__ : Tuple=0.1 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : Union[str, Any]=0.0 ,lowerCamelCase__ : Union[str, Any]=0.0_2 ,lowerCamelCase__ : Union[str, Any]=0.0 ,lowerCamelCase__ : Tuple=False ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : str=1 ,lowerCamelCase__ : Any=0 ,lowerCamelCase__ : Optional[int]=2 ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : Dict=2 ,lowerCamelCase__ : Optional[int]=2 ,lowerCamelCase__ : Optional[int]=False ,lowerCamelCase__ : Tuple=100 ,lowerCamelCase__ : Optional[int]=800 ,**lowerCamelCase__ : int ,): '''simple docstring''' _UpperCamelCase : Optional[int] = vocab_size _UpperCamelCase : Union[str, Any] = max_position_embeddings _UpperCamelCase : Dict = d_model _UpperCamelCase : Any = encoder_ffn_dim _UpperCamelCase : Dict = encoder_layers _UpperCamelCase : Optional[Any] = encoder_attention_heads _UpperCamelCase : Optional[int] = decoder_ffn_dim _UpperCamelCase : str = decoder_layers _UpperCamelCase : int = decoder_attention_heads _UpperCamelCase : str = dropout _UpperCamelCase : str = attention_dropout _UpperCamelCase : List[Any] = activation_dropout _UpperCamelCase : Dict = activation_function _UpperCamelCase : List[str] = init_std _UpperCamelCase : Dict = encoder_layerdrop _UpperCamelCase : Tuple = decoder_layerdrop _UpperCamelCase : Optional[int] = classifier_dropout _UpperCamelCase : str = use_cache _UpperCamelCase : Union[str, Any] = encoder_layers _UpperCamelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True _UpperCamelCase : Any = use_prompt _UpperCamelCase : Optional[int] = prompt_length _UpperCamelCase : Any = prompt_mid_dim super().__init__( pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,is_encoder_decoder=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,forced_eos_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,) if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' ,lowerCamelCase__ ): _UpperCamelCase : Union[str, Any] = self.bos_token_id warnings.warn( F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ' 'The config can simply be saved and uploaded again to be fixed.' )
83
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class UpperCAmelCase_ ( a): lowerCamelCase__ = 'openai/whisper-base' lowerCamelCase__ = ( 'This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the ' 'transcribed text.' ) lowerCamelCase__ = 'transcriber' lowerCamelCase__ = WhisperProcessor lowerCamelCase__ = WhisperForConditionalGeneration lowerCamelCase__ = ['audio'] lowerCamelCase__ = ['text'] def snake_case__ ( self, __a): '''simple docstring''' return self.pre_processor(__a, return_tensors="pt").input_features def snake_case__ ( self, __a): '''simple docstring''' return self.model.generate(inputs=__a) def snake_case__ ( self, __a): '''simple docstring''' return self.pre_processor.batch_decode(__a, skip_special_tokens=__a)[0]
300
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) def A ( _lowerCamelCase , _lowerCamelCase=False ): '''simple docstring''' _lowerCAmelCase : Optional[int] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "vit.embeddings.cls_token"), ("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "vit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _lowerCAmelCase : str = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ): '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: _lowerCAmelCase : int = "" else: _lowerCAmelCase : Union[str, Any] = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _lowerCAmelCase : Dict = state_dict.pop(F"blocks.{i}.attn.qkv.weight" ) _lowerCAmelCase : Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict _lowerCAmelCase : Dict = in_proj_weight[ : config.hidden_size, : ] _lowerCAmelCase : List[str] = in_proj_bias[: config.hidden_size] _lowerCAmelCase : Union[str, Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _lowerCAmelCase : int = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _lowerCAmelCase : int = in_proj_weight[ -config.hidden_size :, : ] _lowerCAmelCase : Optional[int] = in_proj_bias[-config.hidden_size :] def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : int = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(_lowerCamelCase , _lowerCamelCase ) def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = dct.pop(_lowerCamelCase ) _lowerCAmelCase : Tuple = val def A ( ): '''simple docstring''' _lowerCAmelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg" _lowerCAmelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ) return im @torch.no_grad() def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : List[Any] = ViTConfig() _lowerCAmelCase : str = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": _lowerCAmelCase : str = True _lowerCAmelCase : List[str] = int(vit_name[-12:-10] ) _lowerCAmelCase : str = int(vit_name[-9:-6] ) else: _lowerCAmelCase : List[str] = 1_000 _lowerCAmelCase : int = "huggingface/label-files" _lowerCAmelCase : Dict = "imagenet-1k-id2label.json" _lowerCAmelCase : Dict = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) ) _lowerCAmelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()} _lowerCAmelCase : Optional[int] = idalabel _lowerCAmelCase : Dict = {v: k for k, v in idalabel.items()} _lowerCAmelCase : str = int(vit_name[-6:-4] ) _lowerCAmelCase : List[str] = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith("tiny" ): _lowerCAmelCase : str = 192 _lowerCAmelCase : Union[str, Any] = 768 _lowerCAmelCase : str = 12 _lowerCAmelCase : Any = 3 elif vit_name[9:].startswith("small" ): _lowerCAmelCase : Any = 384 _lowerCAmelCase : Any = 1_536 _lowerCAmelCase : List[str] = 12 _lowerCAmelCase : Tuple = 6 else: pass else: if vit_name[4:].startswith("small" ): _lowerCAmelCase : Optional[Any] = 768 _lowerCAmelCase : str = 2_304 _lowerCAmelCase : Optional[int] = 8 _lowerCAmelCase : List[str] = 8 elif vit_name[4:].startswith("base" ): pass elif vit_name[4:].startswith("large" ): _lowerCAmelCase : Optional[Any] = 1_024 _lowerCAmelCase : List[str] = 4_096 _lowerCAmelCase : Dict = 24 _lowerCAmelCase : int = 16 elif vit_name[4:].startswith("huge" ): _lowerCAmelCase : Union[str, Any] = 1_280 _lowerCAmelCase : Optional[int] = 5_120 _lowerCAmelCase : Optional[Any] = 32 _lowerCAmelCase : str = 16 # load original model from timm _lowerCAmelCase : List[Any] = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys _lowerCAmelCase : List[str] = timm_model.state_dict() if base_model: remove_classification_head_(_lowerCamelCase ) _lowerCAmelCase : Union[str, Any] = create_rename_keys(_lowerCamelCase , _lowerCamelCase ) for src, dest in rename_keys: rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # load HuggingFace model if vit_name[-5:] == "in21k": _lowerCAmelCase : Optional[int] = ViTModel(_lowerCamelCase ).eval() else: _lowerCAmelCase : Optional[int] = ViTForImageClassification(_lowerCamelCase ).eval() model.load_state_dict(_lowerCamelCase ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: _lowerCAmelCase : Tuple = DeiTImageProcessor(size=config.image_size ) else: _lowerCAmelCase : Dict = ViTImageProcessor(size=config.image_size ) _lowerCAmelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" ) _lowerCAmelCase : Union[str, Any] = encoding["pixel_values"] _lowerCAmelCase : List[str] = model(_lowerCamelCase ) if base_model: _lowerCAmelCase : List[str] = timm_model.forward_features(_lowerCamelCase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(_lowerCamelCase , outputs.pooler_output , atol=1e-3 ) else: _lowerCAmelCase : Any = timm_model(_lowerCamelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 ) Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_lowerCamelCase ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( "--vit_name", default="vit_base_patch16_224", type=str, help="Name of the ViT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) _snake_case = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
300
1
"""simple docstring""" class lowerCAmelCase_ : """simple docstring""" def __init__( self , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None ): """simple docstring""" snake_case = data snake_case = previous snake_case = next_node def __str__( self ): """simple docstring""" return F"""{self.data}""" def snake_case ( self ): """simple docstring""" return self.data def snake_case ( self ): """simple docstring""" return self.next def snake_case ( self ): """simple docstring""" return self.previous class lowerCAmelCase_ : """simple docstring""" def __init__( self , lowerCAmelCase ): """simple docstring""" snake_case = head def __iter__( self ): """simple docstring""" return self def snake_case ( self ): """simple docstring""" if not self.current: raise StopIteration else: snake_case = self.current.get_data() snake_case = self.current.get_next() return value class lowerCAmelCase_ : """simple docstring""" def __init__( self ): """simple docstring""" snake_case = None # First node in list snake_case = None # Last node in list def __str__( self ): """simple docstring""" snake_case = self.head snake_case = [] while current is not None: nodes.append(current.get_data() ) snake_case = current.get_next() return " ".join(str(A_ ) for node in nodes ) def __contains__( self , lowerCAmelCase ): """simple docstring""" snake_case = self.head while current: if current.get_data() == value: return True snake_case = current.get_next() return False def __iter__( self ): """simple docstring""" return LinkedListIterator(self.head ) def snake_case ( self ): """simple docstring""" if self.head: return self.head.get_data() return None def snake_case ( self ): """simple docstring""" if self.tail: return self.tail.get_data() return None def snake_case ( self , lowerCAmelCase ): """simple docstring""" if self.head is None: snake_case = node snake_case = node else: self.insert_before_node(self.head , A_ ) def snake_case ( self , lowerCAmelCase ): """simple docstring""" if self.head is None: self.set_head(A_ ) else: self.insert_after_node(self.tail , A_ ) def snake_case ( self , lowerCAmelCase ): """simple docstring""" snake_case = Node(A_ ) if self.head is None: self.set_head(A_ ) else: self.set_tail(A_ ) def snake_case ( self , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" snake_case = node snake_case = node.previous if node.get_previous() is None: snake_case = node_to_insert else: snake_case = node_to_insert snake_case = node_to_insert def snake_case ( self , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" snake_case = node snake_case = node.next if node.get_next() is None: snake_case = node_to_insert else: snake_case = node_to_insert snake_case = node_to_insert def snake_case ( self , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" snake_case = 1 snake_case = Node(A_ ) snake_case = self.head while node: if current_position == position: self.insert_before_node(A_ , A_ ) return current_position += 1 snake_case = node.next self.insert_after_node(self.tail , A_ ) def snake_case ( self , lowerCAmelCase ): """simple docstring""" snake_case = self.head while node: if node.get_data() == item: return node snake_case = node.get_next() raise Exception('Node not found' ) def snake_case ( self , lowerCAmelCase ): """simple docstring""" if (node := self.get_node(A_ )) is not None: if node == self.head: snake_case = self.head.get_next() if node == self.tail: snake_case = self.tail.get_previous() self.remove_node_pointers(A_ ) @staticmethod def snake_case ( lowerCAmelCase ): """simple docstring""" if node.get_next(): snake_case = node.previous if node.get_previous(): snake_case = node.next snake_case = None snake_case = None def snake_case ( self ): """simple docstring""" return self.head is None def lowerCAmelCase__ ( ) -> None: """simple docstring""" pass if __name__ == "__main__": import doctest doctest.testmod()
150
from math import sqrt def A_ ( _lowerCAmelCase ) -> bool: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number >= 0 ), "'number' must been an int and positive" UpperCamelCase : List[Any] = True # 0 and 1 are none primes. if number <= 1: UpperCamelCase : List[Any] = False for divisor in range(2 , int(round(sqrt(_lowerCAmelCase ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: UpperCamelCase : Union[str, Any] = False break # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'status' must been from type bool" return status def A_ ( _lowerCAmelCase ) -> Any: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N UpperCamelCase : int = list(range(2 , n + 1 ) ) UpperCamelCase : Optional[int] = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(_lowerCAmelCase ) ): for j in range(i + 1 , len(_lowerCAmelCase ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): UpperCamelCase : Tuple = 0 # filters actual prime numbers. UpperCamelCase : str = [x for x in begin_list if x != 0] # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list" return ans def A_ ( _lowerCAmelCase ) -> Optional[Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2" UpperCamelCase : str = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(_lowerCAmelCase ): ans.append(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list" return ans def A_ ( _lowerCAmelCase ) -> Any: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and number >= 0, "'number' must been an int and >= 0" UpperCamelCase : Optional[Any] = [] # this list will be returns of the function. # potential prime number factors. UpperCamelCase : Tuple = 2 UpperCamelCase : str = number if number == 0 or number == 1: ans.append(_lowerCAmelCase ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(_lowerCAmelCase ): while quotient != 1: if is_prime(_lowerCAmelCase ) and (quotient % factor == 0): ans.append(_lowerCAmelCase ) quotient /= factor else: factor += 1 else: ans.append(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list" return ans def A_ ( _lowerCAmelCase ) -> Any: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" UpperCamelCase : List[Any] = 0 # prime factorization of 'number' UpperCamelCase : Any = prime_factorization(_lowerCAmelCase ) UpperCamelCase : List[Any] = max(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type int" return ans def A_ ( _lowerCAmelCase ) -> Union[str, Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" UpperCamelCase : List[Any] = 0 # prime factorization of 'number' UpperCamelCase : Dict = prime_factorization(_lowerCAmelCase ) UpperCamelCase : List[Any] = min(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type int" return ans def A_ ( _lowerCAmelCase ) -> Optional[Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'number' must been an int" assert isinstance(number % 2 == 0 , _lowerCAmelCase ), "compare bust been from type bool" return number % 2 == 0 def A_ ( _lowerCAmelCase ) -> List[Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'number' must been an int" assert isinstance(number % 2 != 0 , _lowerCAmelCase ), "compare bust been from type bool" return number % 2 != 0 def A_ ( _lowerCAmelCase ) -> Any: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (number > 2) and is_even(_lowerCAmelCase ) ), "'number' must been an int, even and > 2" UpperCamelCase : List[str] = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' UpperCamelCase : Dict = get_prime_numbers(_lowerCAmelCase ) UpperCamelCase : Tuple = len(_lowerCAmelCase ) # run variable for while-loops. UpperCamelCase : Optional[int] = 0 UpperCamelCase : int = None # exit variable. for break up the loops UpperCamelCase : Union[str, Any] = True while i < len_pn and loop: UpperCamelCase : Tuple = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: UpperCamelCase : Any = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (len(_lowerCAmelCase ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." UpperCamelCase : Tuple = 0 while numbera != 0: UpperCamelCase : Tuple = numbera % numbera UpperCamelCase : Any = numbera UpperCamelCase : Union[str, Any] = rest # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." UpperCamelCase : Optional[int] = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' UpperCamelCase : List[Any] = prime_factorization(_lowerCAmelCase ) UpperCamelCase : Union[str, Any] = prime_factorization(_lowerCAmelCase ) elif numbera == 1 or numbera == 1: UpperCamelCase : Optional[Any] = [] UpperCamelCase : int = [] UpperCamelCase : List[Any] = max(_lowerCAmelCase , _lowerCAmelCase ) UpperCamelCase : Optional[int] = 0 UpperCamelCase : Tuple = 0 UpperCamelCase : List[str] = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: UpperCamelCase : str = prime_fac_a.count(_lowerCAmelCase ) UpperCamelCase : Tuple = prime_fac_a.count(_lowerCAmelCase ) for _ in range(max(_lowerCAmelCase , _lowerCAmelCase ) ): ans *= n else: UpperCamelCase : str = prime_fac_a.count(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ): ans *= n done.append(_lowerCAmelCase ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: UpperCamelCase : Any = prime_fac_a.count(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ): ans *= n done.append(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def A_ ( _lowerCAmelCase ) -> Tuple: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'number' must been a positive int" UpperCamelCase : int = 0 UpperCamelCase : int = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(_lowerCAmelCase ): ans += 1 # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and is_prime( _lowerCAmelCase ), "'ans' must been a prime number and from type int" return ans def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: assert ( is_prime(_lowerCAmelCase ) and is_prime(_lowerCAmelCase ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" UpperCamelCase : str = p_number_a + 1 # jump to the next number UpperCamelCase : Dict = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(_lowerCAmelCase ): number += 1 while number < p_number_a: ans.append(_lowerCAmelCase ) number += 1 # fetch the next prime number. while not is_prime(_lowerCAmelCase ): number += 1 # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ans[0] != p_number_a and ans[len(_lowerCAmelCase ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def A_ ( _lowerCAmelCase ) -> List[str]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 1), "'n' must been int and >= 1" UpperCamelCase : Dict = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(_lowerCAmelCase ) # precondition assert ans[0] == 1 and ans[len(_lowerCAmelCase ) - 1] == n, "Error in function getDivisiors(...)" return ans def A_ ( _lowerCAmelCase ) -> int: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number > 1 ), "'number' must been an int and >= 1" UpperCamelCase : int = get_divisors(_lowerCAmelCase ) # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (divisors[0] == 1) and (divisors[len(_lowerCAmelCase ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. UpperCamelCase : List[str] = gcd(abs(_lowerCAmelCase ) , abs(_lowerCAmelCase ) ) # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def A_ ( _lowerCAmelCase ) -> Dict: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'n' must been a int and >= 0" UpperCamelCase : str = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def A_ ( _lowerCAmelCase ) -> Tuple: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'n' must been an int and >= 0" UpperCamelCase : Dict = 0 UpperCamelCase : Dict = 1 UpperCamelCase : Union[str, Any] = 1 # this will be return for _ in range(n - 1 ): UpperCamelCase : Any = ans ans += fiba UpperCamelCase : str = tmp return ans
52
0
"""simple docstring""" import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class lowerCAmelCase__ ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Any , lowercase_ : Dict[str, int] , lowercase_ : List[str] , lowercase_ : int = None , lowercase_ : int = None): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE_ : str = pad_token_id SCREAMING_SNAKE_CASE_ : Optional[int] = max_length SCREAMING_SNAKE_CASE_ : Dict = vocab SCREAMING_SNAKE_CASE_ : Dict = merges SCREAMING_SNAKE_CASE_ : Union[str, Any] = BytePairTokenizer(lowercase_ , lowercase_ , sequence_length=lowercase_) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict , lowercase_ : GPTaTokenizer , *lowercase_ : Optional[Any] , **lowercase_ : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = [''' '''.join(lowercase_) for m in tokenizer.bpe_ranks.keys()] SCREAMING_SNAKE_CASE_ : str = tokenizer.get_vocab() return cls(lowercase_ , lowercase_ , *lowercase_ , **lowercase_) @classmethod def _SCREAMING_SNAKE_CASE ( cls : int , lowercase_ : Union[str, os.PathLike] , *lowercase_ : List[str] , **lowercase_ : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = GPTaTokenizer.from_pretrained(lowercase_ , *lowercase_ , **lowercase_) return cls.from_tokenizer(lowercase_ , *lowercase_ , **lowercase_) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict , lowercase_ : List[Any]): '''simple docstring''' return cls(**lowercase_) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : List[Any] , lowercase_ : int = None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = self.tf_tokenizer(lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = tf.ones_like(lowercase_) if self.pad_token_id is not None: # pad the tokens up to max length SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_length if max_length is not None else self.max_length if max_length is not None: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = pad_model_inputs( lowercase_ , max_seq_length=lowercase_ , pad_value=self.pad_token_id) return {"attention_mask": attention_mask, "input_ids": input_ids}
318
"""simple docstring""" UpperCAmelCase_ : List[Any] = 9.8_0_6_6_5 def _A (__a , __a , __a = g ) -> float: """simple docstring""" if fluid_density <= 0: raise ValueError('''Impossible fluid density''' ) if volume < 0: raise ValueError('''Impossible Object volume''' ) if gravity <= 0: raise ValueError('''Impossible Gravity''' ) return fluid_density * gravity * volume if __name__ == "__main__": import doctest # run doctest doctest.testmod()
318
1
import math def __UpperCamelCase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float ): if initial_intensity < 0: raise ValueError('''The value of intensity cannot be negative''' ) # handling of negative values of initial intensity if angle < 0 or angle > 3_6_0: raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' ) # handling of values out of allowed range return initial_intensity * (math.cos(math.radians(lowerCAmelCase__ ) ) ** 2) if __name__ == "__main__": import doctest doctest.testmod(name='malus_law')
216
def __UpperCamelCase ( lowerCAmelCase__ : str ): if n_term == "": return [] __a : list = [] for temp in range(int(lowerCAmelCase__ ) ): series.append(f"1/{temp + 1}" if series else '''1''' ) return series if __name__ == "__main__": lowercase__ =input('Enter the last number (nth term) of the Harmonic Series') print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n') print(harmonic_series(nth_term))
216
1
import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase ( a_ , a_ , a_=None ) -> Tuple: """simple docstring""" assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match''' __A = nn.Parameter(a_ ) if bias is not None: assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match''' __A = nn.Parameter(a_ ) def UpperCAmelCase ( a_ , a_ , a_ ) -> Dict: """simple docstring""" __A = np.asarray(weights[0] ) __A = np.asarray(weights[1] ) __A = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(a_ ).transpose(1 , 2 ).contiguous().view(-1 , a_ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(a_ ).transpose(1 , 2 ).contiguous().view(-1 , a_ ) , ) set_param( torch_layer.output.dense , torch.tensor(a_ ).view(-1 , a_ ).contiguous().transpose(0 , 1 ) , ) def UpperCAmelCase ( a_ , a_ , a_ ) -> Tuple: """simple docstring""" __A = np.asarray(weights[0] ) __A = np.asarray(weights[1] ) __A = np.asarray(weights[2] ) __A = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(a_ ).transpose(1 , 2 ).contiguous().view(-1 , a_ ) , ) set_param( torch_layer.self_attention.key , torch.tensor(a_ ).transpose(1 , 2 ).contiguous().view(-1 , a_ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(a_ ).transpose(1 , 2 ).contiguous().view(-1 , a_ ) , ) set_param( torch_layer.output.dense , torch.tensor(a_ ).view(-1 , a_ ).contiguous().transpose(0 , 1 ) , ) def UpperCAmelCase ( a_ , a_ , a_ ) -> Any: """simple docstring""" __A = weights[0][0][0] __A = np.asarray(layer_norm_a[0] ) __A = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(a_ ) , torch.tensor(a_ ) , ) # lsh weights + output __A = weights[0][1] if len(a_ ) < 4: set_layer_weights_in_torch_lsh(a_ , torch_block.attention , a_ ) else: set_layer_weights_in_torch_local(a_ , torch_block.attention , a_ ) # intermediate weighs __A = weights[2][0][1][2] # Chunked Feed Forward if len(a_ ) == 4: __A = intermediate_weights[2] # layernorm 2 __A = np.asarray(intermediate_weights[0][0] ) __A = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(a_ ) , torch.tensor(a_ ) , ) # intermediate dense __A = np.asarray(intermediate_weights[1][0] ) __A = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(a_ ).transpose(0 , 1 ).contiguous() , torch.tensor(a_ ) , ) # intermediate out __A = np.asarray(intermediate_weights[4][0] ) __A = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(a_ ).transpose(0 , 1 ).contiguous() , torch.tensor(a_ ) , ) def UpperCAmelCase ( a_ , a_ , a_ ) -> Tuple: """simple docstring""" __A = torch_model.reformer # word embeds __A = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(a_ ) , ) if isinstance(weights[3] , a_ ): __A = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): __A = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), F'''{position_embeddings[emb_idx]} emb does not match''' __A = nn.Parameter(torch.tensor(a_ ) ) __A = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( a_ ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): __A = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(a_ , a_ , a_ ) # output layer norm __A = np.asarray(weights[7][0] ) __A = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(a_ ) , torch.tensor(a_ ) , ) # output embeddings __A = np.asarray(weights[9][0] ) __A = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(a_ ).transpose(0 , 1 ).contiguous() , torch.tensor(a_ ) , ) def UpperCAmelCase ( a_ , a_ , a_ ) -> Any: """simple docstring""" __A = ReformerConfig.from_json_file(a_ ) print(F'''Building PyTorch model from configuration: {config}''' ) __A = ReformerModelWithLMHead(a_ ) with open(a_ , "rb" ) as f: __A = pickle.load(a_ )["weights"] set_model_weights_in_torch(a_ , a_ , config.hidden_size ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , a_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE :str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained Reformer model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) SCREAMING_SNAKE_CASE :str = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
124
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=__SCREAMING_SNAKE_CASE ) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) snake_case_ = Features({"text": Value("string" )} ) snake_case_ = Features({"labels": ClassLabel} ) snake_case_ = "text" snake_case_ = "labels" def UpperCamelCase_ ( self : Optional[Any] ,A : Dict ): if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] ,A ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) __A = copy.deepcopy(self ) __A = self.label_schema.copy() __A = features[self.label_column] __A = label_schema return task_template @property def UpperCamelCase_ ( self : Dict ): return { self.text_column: "text", self.label_column: "labels", }
124
1
"""simple docstring""" def lowercase ( _snake_case : int , _snake_case : int ) ->int: """simple docstring""" while second != 0: __snake_case : Union[str, Any] = first & second first ^= second __snake_case : Tuple = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE : str = int(input("""Enter the first number: """).strip()) SCREAMING_SNAKE_CASE : Dict = int(input("""Enter the second number: """).strip()) print(F'{add(first, second) = }')
102
# Author: OMKAR PATHAK, Nwachukwu Chidiebere # Use a Python dictionary to construct the graph. from __future__ import annotations from pprint import pformat from typing import Generic, TypeVar UpperCAmelCase__ : Tuple = TypeVar("""T""") class a__ ( Generic[T] ): """simple docstring""" def __init__( self : str , UpperCAmelCase__ : bool = True ) ->None: """simple docstring""" SCREAMING_SNAKE_CASE : dict[T, list[T]] = {} # dictionary of lists SCREAMING_SNAKE_CASE : Dict = directed def _lowercase ( self : int , UpperCAmelCase__ : T , UpperCAmelCase__ : T ) ->GraphAdjacencyList[T]: """simple docstring""" if not self.directed: # For undirected graphs # if both source vertex and destination vertex are both present in the # adjacency list, add destination vertex to source vertex list of adjacent # vertices and add source vertex to destination vertex list of adjacent # vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(UpperCAmelCase__ ) self.adj_list[destination_vertex].append(UpperCAmelCase__ ) # if only source vertex is present in adjacency list, add destination vertex # to source vertex list of adjacent vertices, then create a new vertex with # destination vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : int = [source_vertex] # if only destination vertex is present in adjacency list, add source vertex # to destination vertex list of adjacent vertices, then create a new vertex # with source vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif destination_vertex in self.adj_list: self.adj_list[destination_vertex].append(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : str = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and assign a list # containing the destination vertex as it's first adjacent vertex also # create a new vertex with destination vertex as key and assign a list # containing the source vertex as it's first adjacent vertex. else: SCREAMING_SNAKE_CASE : Tuple = [destination_vertex] SCREAMING_SNAKE_CASE : str = [source_vertex] else: # For directed graphs # if both source vertex and destination vertex are present in adjacency # list, add destination vertex to source vertex list of adjacent vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(UpperCAmelCase__ ) # if only source vertex is present in adjacency list, add destination # vertex to source vertex list of adjacent vertices and create a new vertex # with destination vertex as key, which has no adjacent vertex elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : Any = [] # if only destination vertex is present in adjacency list, create a new # vertex with source vertex as key and assign a list containing destination # vertex as first adjacent vertex elif destination_vertex in self.adj_list: SCREAMING_SNAKE_CASE : Optional[Any] = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and a list containing # destination vertex as it's first adjacent vertex. Then create a new vertex # with destination vertex as key, which has no adjacent vertex else: SCREAMING_SNAKE_CASE : Dict = [destination_vertex] SCREAMING_SNAKE_CASE : List[Any] = [] return self def __repr__( self : Dict ) ->str: """simple docstring""" return pformat(self.adj_list )
245
0
"""simple docstring""" import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_torchvision, require_vision, ) from transformers.utils import is_tf_available, is_torch_available, is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, SamImageProcessor, SamProcessor if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_vision @require_torchvision class __a ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Optional[Any] ): UpperCamelCase__ : int =tempfile.mkdtemp() UpperCamelCase__ : int =SamImageProcessor() UpperCamelCase__ : Dict =SamProcessor(lowercase_ ) processor.save_pretrained(self.tmpdirname ) def _lowerCAmelCase ( self : Optional[int] , **lowercase_ : List[Any] ): return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_ ).image_processor def _lowerCAmelCase ( self : Optional[Any] ): shutil.rmtree(self.tmpdirname ) def _lowerCAmelCase ( self : List[Any] ): UpperCamelCase__ : int =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] UpperCamelCase__ : Any =[Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def _lowerCAmelCase ( self : Any ): UpperCamelCase__ : Tuple =SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCamelCase__ : Any =self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 ) UpperCamelCase__ : Any =SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowercase_ , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowercase_ ) def _lowerCAmelCase ( self : Optional[Any] ): UpperCamelCase__ : Optional[int] =self.get_image_processor() UpperCamelCase__ : int =SamProcessor(image_processor=lowercase_ ) UpperCamelCase__ : int =self.prepare_image_inputs() UpperCamelCase__ : List[Any] =image_processor(lowercase_ , return_tensors='''np''' ) UpperCamelCase__ : Optional[Any] =processor(images=lowercase_ , return_tensors='''np''' ) input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor input_feat_extract.pop('''reshaped_input_sizes''' ) # pop original_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) @require_torch def _lowerCAmelCase ( self : List[Any] ): UpperCamelCase__ : Any =self.get_image_processor() UpperCamelCase__ : Union[str, Any] =SamProcessor(image_processor=lowercase_ ) UpperCamelCase__ : Any =[torch.ones((1, 3, 5, 5) )] UpperCamelCase__ : Any =[[1764, 2646]] UpperCamelCase__ : List[str] =[[683, 1024]] UpperCamelCase__ : List[str] =processor.post_process_masks(lowercase_ , lowercase_ , lowercase_ ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) UpperCamelCase__ : List[str] =processor.post_process_masks( lowercase_ , torch.tensor(lowercase_ ) , torch.tensor(lowercase_ ) ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) # should also work with np UpperCamelCase__ : List[Any] =[np.ones((1, 3, 5, 5) )] UpperCamelCase__ : Any =processor.post_process_masks(lowercase_ , np.array(lowercase_ ) , np.array(lowercase_ ) ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) UpperCamelCase__ : Dict =[[1, 0], [0, 1]] with self.assertRaises(lowercase_ ): UpperCamelCase__ : Tuple =processor.post_process_masks(lowercase_ , np.array(lowercase_ ) , np.array(lowercase_ ) ) @require_vision @require_tf class __a ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Optional[Any] ): UpperCamelCase__ : Any =tempfile.mkdtemp() UpperCamelCase__ : Any =SamImageProcessor() UpperCamelCase__ : Tuple =SamProcessor(lowercase_ ) processor.save_pretrained(self.tmpdirname ) def _lowerCAmelCase ( self : Tuple , **lowercase_ : int ): return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_ ).image_processor def _lowerCAmelCase ( self : Dict ): shutil.rmtree(self.tmpdirname ) def _lowerCAmelCase ( self : str ): UpperCamelCase__ : List[Any] =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] UpperCamelCase__ : Optional[Any] =[Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def _lowerCAmelCase ( self : Optional[int] ): UpperCamelCase__ : Any =SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCamelCase__ : Optional[Any] =self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 ) UpperCamelCase__ : Optional[int] =SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowercase_ , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowercase_ ) def _lowerCAmelCase ( self : List[Any] ): UpperCamelCase__ : Dict =self.get_image_processor() UpperCamelCase__ : Tuple =SamProcessor(image_processor=lowercase_ ) UpperCamelCase__ : Any =self.prepare_image_inputs() UpperCamelCase__ : Tuple =image_processor(lowercase_ , return_tensors='''np''' ) UpperCamelCase__ : List[str] =processor(images=lowercase_ , return_tensors='''np''' ) input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor input_feat_extract.pop('''reshaped_input_sizes''' ) # pop reshaped_input_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) @require_tf def _lowerCAmelCase ( self : str ): UpperCamelCase__ : Dict =self.get_image_processor() UpperCamelCase__ : List[str] =SamProcessor(image_processor=lowercase_ ) UpperCamelCase__ : List[str] =[tf.ones((1, 3, 5, 5) )] UpperCamelCase__ : Optional[int] =[[1764, 2646]] UpperCamelCase__ : Dict =[[683, 1024]] UpperCamelCase__ : Tuple =processor.post_process_masks(lowercase_ , lowercase_ , lowercase_ , return_tensors='''tf''' ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) UpperCamelCase__ : int =processor.post_process_masks( lowercase_ , tf.convert_to_tensor(lowercase_ ) , tf.convert_to_tensor(lowercase_ ) , return_tensors='''tf''' , ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) # should also work with np UpperCamelCase__ : Union[str, Any] =[np.ones((1, 3, 5, 5) )] UpperCamelCase__ : Union[str, Any] =processor.post_process_masks( lowercase_ , np.array(lowercase_ ) , np.array(lowercase_ ) , return_tensors='''tf''' ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) UpperCamelCase__ : List[str] =[[1, 0], [0, 1]] with self.assertRaises(tf.errors.InvalidArgumentError ): UpperCamelCase__ : List[Any] =processor.post_process_masks( lowercase_ , np.array(lowercase_ ) , np.array(lowercase_ ) , return_tensors='''tf''' ) @require_vision @require_torchvision class __a ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : str ): UpperCamelCase__ : Tuple =tempfile.mkdtemp() UpperCamelCase__ : Any =SamImageProcessor() UpperCamelCase__ : List[str] =SamProcessor(lowercase_ ) processor.save_pretrained(self.tmpdirname ) def _lowerCAmelCase ( self : Any , **lowercase_ : Union[str, Any] ): return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_ ).image_processor def _lowerCAmelCase ( self : List[str] ): shutil.rmtree(self.tmpdirname ) def _lowerCAmelCase ( self : str ): UpperCamelCase__ : Any =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] UpperCamelCase__ : Optional[Any] =[Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs @is_pt_tf_cross_test def _lowerCAmelCase ( self : List[str] ): UpperCamelCase__ : List[str] =self.get_image_processor() UpperCamelCase__ : Dict =SamProcessor(image_processor=lowercase_ ) UpperCamelCase__ : List[str] =np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa ) UpperCamelCase__ : int =[tf.convert_to_tensor(lowercase_ )] UpperCamelCase__ : str =[torch.tensor(lowercase_ )] UpperCamelCase__ : Tuple =[[1764, 2646]] UpperCamelCase__ : int =[[683, 1024]] UpperCamelCase__ : Dict =processor.post_process_masks( lowercase_ , lowercase_ , lowercase_ , return_tensors='''tf''' ) UpperCamelCase__ : Union[str, Any] =processor.post_process_masks( lowercase_ , lowercase_ , lowercase_ , return_tensors='''pt''' ) self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) ) @is_pt_tf_cross_test def _lowerCAmelCase ( self : Any ): UpperCamelCase__ : Union[str, Any] =self.get_image_processor() UpperCamelCase__ : int =SamProcessor(image_processor=lowercase_ ) UpperCamelCase__ : Optional[Any] =self.prepare_image_inputs() UpperCamelCase__ : Union[str, Any] =image_processor(lowercase_ , return_tensors='''pt''' )['''pixel_values'''].numpy() UpperCamelCase__ : int =processor(images=lowercase_ , return_tensors='''pt''' )['''pixel_values'''].numpy() UpperCamelCase__ : str =image_processor(lowercase_ , return_tensors='''tf''' )['''pixel_values'''].numpy() UpperCamelCase__ : str =processor(images=lowercase_ , return_tensors='''tf''' )['''pixel_values'''].numpy() self.assertTrue(np.allclose(lowercase_ , lowercase_ ) ) self.assertTrue(np.allclose(lowercase_ , lowercase_ ) ) self.assertTrue(np.allclose(lowercase_ , lowercase_ ) )
361
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __a ( snake_case__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ = ['image_processor', 'tokenizer'] SCREAMING_SNAKE_CASE_ = 'ChineseCLIPImageProcessor' SCREAMING_SNAKE_CASE_ = ('BertTokenizer', 'BertTokenizerFast') def __init__( self : Tuple , lowercase_ : Union[str, Any]=None , lowercase_ : int=None , **lowercase_ : Union[str, Any] ): UpperCamelCase__ : List[str] =None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , lowercase_ , ) UpperCamelCase__ : List[str] =kwargs.pop('''feature_extractor''' ) UpperCamelCase__ : List[Any] =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(lowercase_ , lowercase_ ) UpperCamelCase__ : Union[str, Any] =self.image_processor def __call__( self : Optional[int] , lowercase_ : int=None , lowercase_ : Optional[int]=None , lowercase_ : int=None , **lowercase_ : Union[str, Any] ): if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: UpperCamelCase__ : Optional[int] =self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ ) if images is not None: UpperCamelCase__ : str =self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ ) if text is not None and images is not None: UpperCamelCase__ : Optional[int] =image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ ) def _lowerCAmelCase ( self : Any , *lowercase_ : Any , **lowercase_ : Optional[int] ): return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ ) def _lowerCAmelCase ( self : str , *lowercase_ : Dict , **lowercase_ : Union[str, Any] ): return self.tokenizer.decode(*lowercase_ , **lowercase_ ) @property def _lowerCAmelCase ( self : List[Any] ): UpperCamelCase__ : List[str] =self.tokenizer.model_input_names UpperCamelCase__ : List[str] =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def _lowerCAmelCase ( self : Any ): warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase_ , ) return self.image_processor_class
157
0
from collections.abc import Sequence from queue import Queue class _snake_case : def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ): a :Tuple = start a :int = end a :List[str] = val a :List[str] = (start + end) // 2 a :Any = left a :Tuple = right def __repr__( self ): return F'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})''' class _snake_case : def __init__( self , _lowerCamelCase , _lowerCamelCase ): a :Tuple = collection a :Union[str, Any] = function if self.collection: a :Optional[int] = self._build_tree(0 , len(__UpperCAmelCase ) - 1 ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ): self._update_tree(self.root , __UpperCAmelCase , __UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ): return self._query_range(self.root , __UpperCAmelCase , __UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ): if start == end: return SegmentTreeNode(__UpperCAmelCase , __UpperCAmelCase , self.collection[start] ) a :Optional[Any] = (start + end) // 2 a :Optional[int] = self._build_tree(__UpperCAmelCase , __UpperCAmelCase ) a :List[str] = self._build_tree(mid + 1 , __UpperCAmelCase ) return SegmentTreeNode(__UpperCAmelCase , __UpperCAmelCase , self.fn(left.val , right.val ) , __UpperCAmelCase , __UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if node.start == i and node.end == i: a :Union[str, Any] = val return if i <= node.mid: self._update_tree(node.left , __UpperCAmelCase , __UpperCAmelCase ) else: self._update_tree(node.right , __UpperCAmelCase , __UpperCAmelCase ) a :Any = self.fn(node.left.val , node.right.val ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if node.start == i and node.end == j: return node.val if i <= node.mid: if j <= node.mid: # range in left child tree return self._query_range(node.left , __UpperCAmelCase , __UpperCAmelCase ) else: # range in left child tree and right child tree return self.fn( self._query_range(node.left , __UpperCAmelCase , node.mid ) , self._query_range(node.right , node.mid + 1 , __UpperCAmelCase ) , ) else: # range in right child tree return self._query_range(node.right , __UpperCAmelCase , __UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self ): if self.root is not None: a :Dict = Queue() queue.put(self.root ) while not queue.empty(): a :Union[str, Any] = queue.get() yield node if node.left is not None: queue.put(node.left ) if node.right is not None: queue.put(node.right ) if __name__ == "__main__": import operator for fn in [operator.add, max, min]: print('''*''' * 50) snake_case : Dict = SegmentTree([2, 1, 5, 3, 4], fn) for node in arr.traverse(): print(node) print() arr.update(1, 5) for node in arr.traverse(): print(node) print() print(arr.query_range(3, 4)) # 7 print(arr.query_range(2, 2)) # 5 print(arr.query_range(1, 3)) # 13 print()
94
import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin a_ = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''') class lowercase__ ( _UpperCAmelCase, unittest.TestCase ): a_ =BartphoTokenizer a_ =False a_ =True def UpperCAmelCase ( self )-> Dict: '''simple docstring''' super().setUp() lowerCAmelCase__ = ["▁This", "▁is", "▁a", "▁t", "est"] lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) ) lowerCAmelCase__ = {"unk_token": "<unk>"} lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] ) with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp: for token in vocab_tokens: fp.write(F"{token} {vocab_tokens[token]}\n" ) lowerCAmelCase__ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]: '''simple docstring''' kwargs.update(self.special_tokens_map ) return BartphoTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]: '''simple docstring''' lowerCAmelCase__ = "This is a là test" lowerCAmelCase__ = "This is a<unk><unk> test" return input_text, output_text def UpperCAmelCase ( self )-> Optional[Any]: '''simple docstring''' lowerCAmelCase__ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map ) lowerCAmelCase__ = "This is a là test" lowerCAmelCase__ = "▁This ▁is ▁a ▁l à ▁t est".split() lowerCAmelCase__ = tokenizer.tokenize(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ = tokens + [tokenizer.unk_token] lowerCAmelCase__ = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
340
0
"""simple docstring""" from __future__ import annotations import requests lowerCamelCase = set( """approved_at_utc approved_by author_flair_background_color author_flair_css_class author_flair_richtext author_flair_template_id author_fullname author_premium can_mod_post category clicked content_categories created_utc downs edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta is_original_content is_reddit_media_domain is_video link_flair_css_class link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title name permalink pwls quarantine saved score secure_media secure_media_embed selftext subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type total_awards_received ups upvote_ratio url user_reports""".split() ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = 1 , lowerCAmelCase__ = "new" , lowerCAmelCase__ = None ): UpperCAmelCase_ = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(lowerCAmelCase__ ) - valid_terms ) ): UpperCAmelCase_ = f"""Invalid search term: {invalid_search_terms}""" raise ValueError(lowerCAmelCase__ ) UpperCAmelCase_ = requests.get( f"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={"User-agent": "A random string"} , ) if response.status_code == 429: raise requests.HTTPError UpperCAmelCase_ = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(lowerCAmelCase__ )} UpperCAmelCase_ = {} for id_ in range(lowerCAmelCase__ ): UpperCAmelCase_ = { item: data["data"]["children"][id_]["data"][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
241
"""simple docstring""" import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline lowerCamelCase = datasets.utils.logging.get_logger(__name__) @dataclass class lowercase__ ( datasets.BuilderConfig ): '''simple docstring''' UpperCamelCase = None UpperCamelCase = "utf-8" UpperCamelCase = None UpperCamelCase = None UpperCamelCase = True # deprecated UpperCamelCase = None # deprecated UpperCamelCase = 10 << 20 # 10MB UpperCamelCase = None class lowercase__ ( datasets.ArrowBasedBuilder ): '''simple docstring''' UpperCamelCase = JsonConfig def lowercase__ ( self : List[Any] ) -> Dict: '''simple docstring''' if self.config.block_size is not None: logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead" ) UpperCAmelCase_ = self.config.block_size if self.config.use_threads is not True: logger.warning( "The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore." ) if self.config.newlines_in_values is not None: raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported" ) return datasets.DatasetInfo(features=self.config.features ) def lowercase__ ( self : Tuple , _UpperCAmelCase : Optional[int] ) -> Optional[int]: '''simple docstring''' if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) UpperCAmelCase_ = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_UpperCAmelCase , (str, list, tuple) ): UpperCAmelCase_ = data_files if isinstance(_UpperCAmelCase , _UpperCAmelCase ): UpperCAmelCase_ = [files] UpperCAmelCase_ = [dl_manager.iter_files(_UpperCAmelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] UpperCAmelCase_ = [] for split_name, files in data_files.items(): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): UpperCAmelCase_ = [files] UpperCAmelCase_ = [dl_manager.iter_files(_UpperCAmelCase ) for file in files] splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={"files": files} ) ) return splits def lowercase__ ( self : str , _UpperCAmelCase : pa.Table ) -> pa.Table: '''simple docstring''' if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): UpperCAmelCase_ = self.config.features.arrow_schema.field(_UpperCAmelCase ).type UpperCAmelCase_ = pa_table.append_column(_UpperCAmelCase , pa.array([None] * len(_UpperCAmelCase ) , type=_UpperCAmelCase ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example UpperCAmelCase_ = table_cast(_UpperCAmelCase , self.config.features.arrow_schema ) return pa_table def lowercase__ ( self : Optional[int] , _UpperCAmelCase : List[Any] ) -> str: '''simple docstring''' for file_idx, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(_UpperCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: UpperCAmelCase_ = json.load(_UpperCAmelCase ) # We keep only the field we are interested in UpperCAmelCase_ = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(_UpperCAmelCase , (list, tuple) ): UpperCAmelCase_ = set().union(*[row.keys() for row in dataset] ) UpperCAmelCase_ = {col: [row.get(_UpperCAmelCase ) for row in dataset] for col in keys} else: UpperCAmelCase_ = dataset UpperCAmelCase_ = pa.Table.from_pydict(_UpperCAmelCase ) yield file_idx, self._cast_table(_UpperCAmelCase ) # If the file has one json object per line else: with open(_UpperCAmelCase , "rb" ) as f: UpperCAmelCase_ = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small UpperCAmelCase_ = max(self.config.chunksize // 32 , 16 << 10 ) UpperCAmelCase_ = ( self.config.encoding_errors if self.config.encoding_errors is not None else "strict" ) while True: UpperCAmelCase_ = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(_UpperCAmelCase ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": UpperCAmelCase_ = batch.decode(self.config.encoding , errors=_UpperCAmelCase ).encode("utf-8" ) try: while True: try: UpperCAmelCase_ = paj.read_json( io.BytesIO(_UpperCAmelCase ) , read_options=paj.ReadOptions(block_size=_UpperCAmelCase ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(_UpperCAmelCase , pa.ArrowInvalid ) and "straddling" not in str(_UpperCAmelCase ) or block_size > len(_UpperCAmelCase ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( F"""Batch of {len(_UpperCAmelCase )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( _UpperCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: UpperCAmelCase_ = json.load(_UpperCAmelCase ) except json.JSONDecodeError: logger.error(F"""Failed to read file '{file}' with error {type(_UpperCAmelCase )}: {e}""" ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(_UpperCAmelCase , _UpperCAmelCase ): # list is the only sequence type supported in JSON try: UpperCAmelCase_ = set().union(*[row.keys() for row in dataset] ) UpperCAmelCase_ = {col: [row.get(_UpperCAmelCase ) for row in dataset] for col in keys} UpperCAmelCase_ = pa.Table.from_pydict(_UpperCAmelCase ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(F"""Failed to read file '{file}' with error {type(_UpperCAmelCase )}: {e}""" ) raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None yield file_idx, self._cast_table(_UpperCAmelCase ) break else: logger.error(F"""Failed to read file '{file}' with error {type(_UpperCAmelCase )}: {e}""" ) raise ValueError( F"""Not able to read records in the JSON file at {file}. """ F"""You should probably indicate the field of the JSON file containing your records. """ F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """ F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_UpperCAmelCase ) batch_idx += 1
241
1
"""simple docstring""" import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class _a ( unittest.TestCase): """simple docstring""" def lowercase__ ( self : str )->Tuple: _UpperCAmelCase = '''laion/clap-htsat-unfused''' _UpperCAmelCase = tempfile.mkdtemp() def lowercase__ ( self : List[Any] , **__UpperCamelCase : List[Any] )->str: return RobertaTokenizer.from_pretrained(self.checkpoint , **__UpperCamelCase ) def lowercase__ ( self : int , **__UpperCamelCase : Dict )->Dict: return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__UpperCamelCase ) def lowercase__ ( self : Dict )->Optional[Any]: shutil.rmtree(self.tmpdirname ) def lowercase__ ( self : List[Any] )->int: _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = self.get_feature_extractor() _UpperCAmelCase = ClapProcessor(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase ) processor.save_pretrained(self.tmpdirname ) _UpperCAmelCase = ClapProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , __UpperCamelCase ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __UpperCamelCase ) def lowercase__ ( self : List[str] )->Any: _UpperCAmelCase = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname ) _UpperCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _UpperCAmelCase = self.get_feature_extractor(do_normalize=__UpperCamelCase , padding_value=1.0 ) _UpperCAmelCase = ClapProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCamelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __UpperCamelCase ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __UpperCamelCase ) def lowercase__ ( self : Tuple )->List[str]: _UpperCAmelCase = self.get_feature_extractor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = ClapProcessor(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase ) _UpperCAmelCase = floats_list((3, 1_0_0_0) ) _UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors='''np''' ) _UpperCAmelCase = processor(audios=__UpperCamelCase , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowercase__ ( self : Any )->Optional[int]: _UpperCAmelCase = self.get_feature_extractor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = ClapProcessor(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase ) _UpperCAmelCase = '''This is a test string''' _UpperCAmelCase = processor(text=__UpperCamelCase ) _UpperCAmelCase = tokenizer(__UpperCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase__ ( self : Optional[Any] )->Optional[Any]: _UpperCAmelCase = self.get_feature_extractor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = ClapProcessor(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase ) _UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _UpperCAmelCase = processor.batch_decode(__UpperCamelCase ) _UpperCAmelCase = tokenizer.batch_decode(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Optional[int] )->List[str]: _UpperCAmelCase = self.get_feature_extractor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = ClapProcessor(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase ) self.assertListEqual( processor.model_input_names[2:] , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
260
"""simple docstring""" import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging __A : Union[str, Any] = "\\n\n" __A : Any = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n" __A : List[str] = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _a ( datasets.Metric): """simple docstring""" def lowercase__ ( self : List[Any] )->Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''input_texts''': datasets.Value('''string''' ), } ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , ) def lowercase__ ( self : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : int = 1_6 , __UpperCamelCase : bool = True , __UpperCamelCase : List[Any]=None )->Any: if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": _UpperCAmelCase = '''cuda''' else: _UpperCAmelCase = '''cuda''' if torch.cuda.is_available() else '''cpu''' _UpperCAmelCase = AutoModelForCausalLM.from_pretrained(__UpperCamelCase ) _UpperCAmelCase = model.to(__UpperCamelCase ) _UpperCAmelCase = AutoTokenizer.from_pretrained(__UpperCamelCase ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: _UpperCAmelCase = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(__UpperCamelCase ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" _UpperCAmelCase = model.config.max_length - 1 else: _UpperCAmelCase = model.config.max_length _UpperCAmelCase = tokenizer( __UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors='''pt''' , return_attention_mask=__UpperCamelCase , ).to(__UpperCamelCase ) _UpperCAmelCase = encodings['''input_ids'''] _UpperCAmelCase = encodings['''attention_mask'''] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." _UpperCAmelCase = [] _UpperCAmelCase = CrossEntropyLoss(reduction='''none''' ) for start_index in logging.tqdm(range(0 , len(__UpperCamelCase ) , __UpperCamelCase ) ): _UpperCAmelCase = min(start_index + batch_size , len(__UpperCamelCase ) ) _UpperCAmelCase = encoded_texts[start_index:end_index] _UpperCAmelCase = attn_masks[start_index:end_index] if add_start_token: _UpperCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__UpperCamelCase ) _UpperCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) _UpperCAmelCase = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__UpperCamelCase ), attn_mask] , dim=1 ) _UpperCAmelCase = encoded_batch with torch.no_grad(): _UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase ).logits _UpperCAmelCase = out_logits[..., :-1, :].contiguous() _UpperCAmelCase = labels[..., 1:].contiguous() _UpperCAmelCase = attn_mask[..., 1:].contiguous() _UpperCAmelCase = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , __UpperCamelCase ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(__UpperCamelCase )}
260
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : int = logging.get_logger(__name__) _UpperCAmelCase : Dict = { "facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json", } class lowerCAmelCase ( _SCREAMING_SNAKE_CASE ): UpperCAmelCase__ = """timesformer""" def __init__( self : int , UpperCAmelCase : List[str]=224 , UpperCAmelCase : Optional[int]=16 , UpperCAmelCase : Any=3 , UpperCAmelCase : Optional[int]=8 , UpperCAmelCase : str=768 , UpperCAmelCase : Dict=12 , UpperCAmelCase : Union[str, Any]=12 , UpperCAmelCase : str=3072 , UpperCAmelCase : Tuple="gelu" , UpperCAmelCase : int=0.0 , UpperCAmelCase : int=0.0 , UpperCAmelCase : List[str]=0.0_2 , UpperCAmelCase : Optional[int]=1e-6 , UpperCAmelCase : str=True , UpperCAmelCase : str="divided_space_time" , UpperCAmelCase : Dict=0 , **UpperCAmelCase : Optional[Any] , ) -> List[str]: super().__init__(**UpperCAmelCase ) lowerCamelCase__ : Union[str, Any] = image_size lowerCamelCase__ : int = patch_size lowerCamelCase__ : Optional[Any] = num_channels lowerCamelCase__ : Optional[int] = num_frames lowerCamelCase__ : str = hidden_size lowerCamelCase__ : Optional[int] = num_hidden_layers lowerCamelCase__ : Optional[int] = num_attention_heads lowerCamelCase__ : Union[str, Any] = intermediate_size lowerCamelCase__ : Union[str, Any] = hidden_act lowerCamelCase__ : List[str] = hidden_dropout_prob lowerCamelCase__ : int = attention_probs_dropout_prob lowerCamelCase__ : List[Any] = initializer_range lowerCamelCase__ : int = layer_norm_eps lowerCamelCase__ : Any = qkv_bias lowerCamelCase__ : Any = attention_type lowerCamelCase__ : Optional[Any] = drop_path_rate
365
from collections import deque class lowerCAmelCase : def __init__( self : str , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : int ) -> None: lowerCamelCase__ : Optional[int] = process_name # process name lowerCamelCase__ : Optional[int] = arrival_time # arrival time of the process # completion time of finished process or last interrupted time lowerCamelCase__ : str = arrival_time lowerCamelCase__ : List[Any] = burst_time # remaining burst time lowerCamelCase__ : Any = 0 # total time of the process wait in ready queue lowerCamelCase__ : Tuple = 0 # time from arrival time to completion time class lowerCAmelCase : def __init__( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : list[int] , UpperCAmelCase : deque[Process] , UpperCAmelCase : int , ) -> None: # total number of mlfq's queues lowerCamelCase__ : Optional[int] = number_of_queues # time slice of queues that round robin algorithm applied lowerCamelCase__ : List[str] = time_slices # unfinished process is in this ready_queue lowerCamelCase__ : List[str] = queue # current time lowerCamelCase__ : Optional[Any] = current_time # finished process is in this sequence queue lowerCamelCase__ : deque[Process] = deque() def A_ ( self : Tuple ) -> list[str]: lowerCamelCase__ : Union[str, Any] = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def A_ ( self : Tuple , UpperCAmelCase : list[Process] ) -> list[int]: lowerCamelCase__ : Tuple = [] for i in range(len(UpperCAmelCase ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def A_ ( self : Union[str, Any] , UpperCAmelCase : list[Process] ) -> list[int]: lowerCamelCase__ : int = [] for i in range(len(UpperCAmelCase ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def A_ ( self : Optional[int] , UpperCAmelCase : list[Process] ) -> list[int]: lowerCamelCase__ : Tuple = [] for i in range(len(UpperCAmelCase ) ): completion_times.append(queue[i].stop_time ) return completion_times def A_ ( self : str , UpperCAmelCase : deque[Process] ) -> list[int]: return [q.burst_time for q in queue] def A_ ( self : int , UpperCAmelCase : Process ) -> int: process.waiting_time += self.current_time - process.stop_time return process.waiting_time def A_ ( self : Optional[int] , UpperCAmelCase : deque[Process] ) -> deque[Process]: lowerCamelCase__ : deque[Process] = deque() # sequence deque of finished process while len(UpperCAmelCase ) != 0: lowerCamelCase__ : List[Any] = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(UpperCAmelCase ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 lowerCamelCase__ : Optional[int] = 0 # set the process's turnaround time because it is finished lowerCamelCase__ : Union[str, Any] = self.current_time - cp.arrival_time # set the completion time lowerCamelCase__ : Any = self.current_time # add the process to queue that has finished queue finished.append(UpperCAmelCase ) self.finish_queue.extend(UpperCAmelCase ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def A_ ( self : str , UpperCAmelCase : deque[Process] , UpperCAmelCase : int ) -> tuple[deque[Process], deque[Process]]: lowerCamelCase__ : deque[Process] = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(UpperCAmelCase ) ): lowerCamelCase__ : Dict = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(UpperCAmelCase ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time lowerCamelCase__ : List[str] = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(UpperCAmelCase ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished lowerCamelCase__ : Any = 0 # set the finish time lowerCamelCase__ : int = self.current_time # update the process' turnaround time because it is finished lowerCamelCase__ : Dict = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(UpperCAmelCase ) self.finish_queue.extend(UpperCAmelCase ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def A_ ( self : Dict ) -> deque[Process]: # all queues except last one have round_robin algorithm for i in range(self.number_of_queues - 1 ): lowerCamelCase__ , lowerCamelCase__ : Any = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest _UpperCAmelCase : List[str] = Process("""P1""", 0, 53) _UpperCAmelCase : Union[str, Any] = Process("""P2""", 0, 17) _UpperCAmelCase : int = Process("""P3""", 0, 68) _UpperCAmelCase : str = Process("""P4""", 0, 24) _UpperCAmelCase : Optional[int] = 3 _UpperCAmelCase : Optional[Any] = [17, 25] _UpperCAmelCase : Optional[int] = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])}) _UpperCAmelCase : Tuple = Process("""P1""", 0, 53) _UpperCAmelCase : Any = Process("""P2""", 0, 17) _UpperCAmelCase : Any = Process("""P3""", 0, 68) _UpperCAmelCase : List[Any] = Process("""P4""", 0, 24) _UpperCAmelCase : List[str] = 3 _UpperCAmelCase : Optional[int] = [17, 25] _UpperCAmelCase : Optional[int] = deque([Pa, Pa, Pa, Pa]) _UpperCAmelCase : Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0) _UpperCAmelCase : Dict = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( F"""waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print completion times of processes(P1, P2, P3, P4) print( F"""completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print total turnaround times of processes(P1, P2, P3, P4) print( F"""turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print sequence of finished processes print( F"""sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}""" )
45
0
from __future__ import annotations lowerCamelCase__ = { """A""": ["""B""", """C""", """E"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F""", """G"""], """D""": ["""B"""], """E""": ["""A""", """B""", """D"""], """F""": ["""C"""], """G""": ["""C"""], } class A__ : def __init__( self : List[str] , a : Dict , a : int ): '''simple docstring''' lowerCAmelCase__ : List[str] = graph # mapping node to its parent in resulting breadth first tree lowerCAmelCase__ : List[str] = {} lowerCAmelCase__ : int = source_vertex def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : Any = {self.source_vertex} lowerCAmelCase__ : int = None lowerCAmelCase__ : Dict = [self.source_vertex] # first in first out queue while queue: lowerCAmelCase__ : int = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(a ) lowerCAmelCase__ : Any = vertex queue.append(a ) def _lowerCamelCase ( self : str , a : str ): '''simple docstring''' if target_vertex == self.source_vertex: return self.source_vertex lowerCAmelCase__ : Optional[int] = self.parent.get(a ) if target_vertex_parent is None: lowerCAmelCase__ : Any = ( f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}''' ) raise ValueError(a ) return self.shortest_path(a ) + f'''->{target_vertex}''' if __name__ == "__main__": lowerCamelCase__ = Graph(graph, """G""") g.breath_first_search() print(g.shortest_path("""D""")) print(g.shortest_path("""G""")) print(g.shortest_path("""Foo"""))
212
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __lowerCamelCase ( snake_case__): """simple docstring""" UpperCamelCase__ = ["image_processor", "tokenizer"] UpperCamelCase__ = "Pix2StructImageProcessor" UpperCamelCase__ = ("T5Tokenizer", "T5TokenizerFast") def __init__( self , UpperCAmelCase , UpperCAmelCase ): """simple docstring""" _UpperCAmelCase = False super().__init__(UpperCAmelCase , UpperCAmelCase ) def __call__( self , UpperCAmelCase=None , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 2048 , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = True , UpperCAmelCase = None , **UpperCAmelCase , ): """simple docstring""" if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None and not self.image_processor.is_vqa: _UpperCAmelCase = self.tokenizer _UpperCAmelCase = self.tokenizer( text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , ) return text_encoding if not self.image_processor.is_vqa: # add pixel_values _UpperCAmelCase = self.image_processor( UpperCAmelCase , return_tensors=UpperCAmelCase , max_patches=UpperCAmelCase , **UpperCAmelCase ) else: # add pixel_values and bbox _UpperCAmelCase = self.image_processor( UpperCAmelCase , return_tensors=UpperCAmelCase , max_patches=UpperCAmelCase , header_text=UpperCAmelCase , **UpperCAmelCase ) if text is not None and not self.image_processor.is_vqa: _UpperCAmelCase = self.tokenizer( text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , ) if "attention_mask" in text_encoding: _UpperCAmelCase = text_encoding.pop('attention_mask' ) if "input_ids" in text_encoding: _UpperCAmelCase = text_encoding.pop('input_ids' ) else: _UpperCAmelCase = None if text_encoding is not None: encoding_image_processor.update(UpperCAmelCase ) return encoding_image_processor def UpperCamelCase ( self , *UpperCAmelCase , **UpperCAmelCase ): """simple docstring""" return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase ) def UpperCamelCase ( self , *UpperCAmelCase , **UpperCAmelCase ): """simple docstring""" return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase ) @property def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self.tokenizer.model_input_names _UpperCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
39
0
import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) __lowerCamelCase : List[str] = logging.getLogger(__name__) def __SCREAMING_SNAKE_CASE ( ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser( description="""Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).""" ) parser.add_argument("""--file_path""" , type=__UpperCamelCase , default="""data/dump.txt""" , help="""The path to the data.""" ) parser.add_argument("""--tokenizer_type""" , type=__UpperCamelCase , default="""bert""" , choices=["""bert""", """roberta""", """gpt2"""] ) parser.add_argument("""--tokenizer_name""" , type=__UpperCamelCase , default="""bert-base-uncased""" , help="""The tokenizer to use.""" ) parser.add_argument("""--dump_file""" , type=__UpperCamelCase , default="""data/dump""" , help="""The dump file prefix.""" ) SCREAMING_SNAKE_CASE__ = parser.parse_args() logger.info(f"""Loading Tokenizer ({args.tokenizer_name})""" ) if args.tokenizer_type == "bert": SCREAMING_SNAKE_CASE__ = BertTokenizer.from_pretrained(args.tokenizer_name ) SCREAMING_SNAKE_CASE__ = tokenizer.special_tokens_map["""cls_token"""] # `[CLS]` SCREAMING_SNAKE_CASE__ = tokenizer.special_tokens_map["""sep_token"""] # `[SEP]` elif args.tokenizer_type == "roberta": SCREAMING_SNAKE_CASE__ = RobertaTokenizer.from_pretrained(args.tokenizer_name ) SCREAMING_SNAKE_CASE__ = tokenizer.special_tokens_map["""cls_token"""] # `<s>` SCREAMING_SNAKE_CASE__ = tokenizer.special_tokens_map["""sep_token"""] # `</s>` elif args.tokenizer_type == "gpt2": SCREAMING_SNAKE_CASE__ = GPTaTokenizer.from_pretrained(args.tokenizer_name ) SCREAMING_SNAKE_CASE__ = tokenizer.special_tokens_map["""bos_token"""] # `<|endoftext|>` SCREAMING_SNAKE_CASE__ = tokenizer.special_tokens_map["""eos_token"""] # `<|endoftext|>` logger.info(f"""Loading text from {args.file_path}""" ) with open(args.file_path , """r""" , encoding="""utf8""" ) as fp: SCREAMING_SNAKE_CASE__ = fp.readlines() logger.info("""Start encoding""" ) logger.info(f"""{len(__UpperCamelCase )} examples to process.""" ) SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = 1_00_00 SCREAMING_SNAKE_CASE__ = time.time() for text in data: SCREAMING_SNAKE_CASE__ = f"""{bos} {text.strip()} {sep}""" SCREAMING_SNAKE_CASE__ = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) rslt.append(__UpperCamelCase ) iter += 1 if iter % interval == 0: SCREAMING_SNAKE_CASE__ = time.time() logger.info(f"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" ) SCREAMING_SNAKE_CASE__ = time.time() logger.info("""Finished binarization""" ) logger.info(f"""{len(__UpperCamelCase )} examples processed.""" ) SCREAMING_SNAKE_CASE__ = f"""{args.dump_file}.{args.tokenizer_name}.pickle""" SCREAMING_SNAKE_CASE__ = tokenizer.vocab_size if vocab_size < (1 << 16): SCREAMING_SNAKE_CASE__ = [np.uintaa(__UpperCamelCase ) for d in rslt] else: SCREAMING_SNAKE_CASE__ = [np.intaa(__UpperCamelCase ) for d in rslt] random.shuffle(rslt_ ) logger.info(f"""Dump to {dp_file}""" ) with open(__UpperCamelCase , """wb""" ) as handle: pickle.dump(rslt_ , __UpperCamelCase , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
204
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __lowerCamelCase : Optional[Any] = { '''configuration_efficientformer''': [ '''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''EfficientFormerConfig''', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = ['''EfficientFormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Optional[int] = [ '''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''EfficientFormerForImageClassification''', '''EfficientFormerForImageClassificationWithTeacher''', '''EfficientFormerModel''', '''EfficientFormerPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Any = [ '''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFEfficientFormerForImageClassification''', '''TFEfficientFormerForImageClassificationWithTeacher''', '''TFEfficientFormerModel''', '''TFEfficientFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys __lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
204
1
'''simple docstring''' import contextlib import copy import random from typing import Any, Dict, Iterable, Optional, Union import numpy as np import torch from .utils import deprecate, is_transformers_available if is_transformers_available(): import transformers def __UpperCAmelCase ( a_: int ): random.seed(a_ ) np.random.seed(a_ ) torch.manual_seed(a_ ) torch.cuda.manual_seed_all(a_ ) # ^^ safe to call this function even if cuda is not available class A__ : """simple docstring""" def __init__( self : List[Any] , lowerCAmelCase__ : Iterable[torch.nn.Parameter] , lowerCAmelCase__ : float = 0.9999 , lowerCAmelCase__ : float = 0.0 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Union[float, int] = 1.0 , lowerCAmelCase__ : Union[float, int] = 2 / 3 , lowerCAmelCase__ : Optional[Any] = None , lowerCAmelCase__ : Dict[str, Any] = None , **lowerCAmelCase__ : List[str] , ) -> Optional[Any]: """simple docstring""" if isinstance(lowerCAmelCase__ , torch.nn.Module ): _UpperCAmelCase : Union[str, Any] = ( "Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. " "Please pass the parameters of the module instead." ) deprecate( "passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ , ) _UpperCAmelCase : Union[str, Any] = parameters.parameters() # set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility _UpperCAmelCase : Dict = True if kwargs.get("max_value" , lowerCAmelCase__ ) is not None: _UpperCAmelCase : Any = "The `max_value` argument is deprecated. Please use `decay` instead." deprecate("max_value" , "1.0.0" , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = kwargs["max_value"] if kwargs.get("min_value" , lowerCAmelCase__ ) is not None: _UpperCAmelCase : Optional[Any] = "The `min_value` argument is deprecated. Please use `min_decay` instead." deprecate("min_value" , "1.0.0" , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = kwargs["min_value"] _UpperCAmelCase : int = list(lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = [p.clone().detach() for p in parameters] if kwargs.get("device" , lowerCAmelCase__ ) is not None: _UpperCAmelCase : Optional[Any] = "The `device` argument is deprecated. Please use `to` instead." deprecate("device" , "1.0.0" , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ ) self.to(device=kwargs["device"] ) _UpperCAmelCase : int = None _UpperCAmelCase : List[str] = decay _UpperCAmelCase : Any = min_decay _UpperCAmelCase : int = update_after_step _UpperCAmelCase : Optional[int] = use_ema_warmup _UpperCAmelCase : str = inv_gamma _UpperCAmelCase : Tuple = power _UpperCAmelCase : Any = 0 _UpperCAmelCase : Optional[int] = None # set in `step()` _UpperCAmelCase : Union[str, Any] = model_cls _UpperCAmelCase : Optional[Any] = model_config @classmethod def _lowerCAmelCase ( cls : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict ) -> "EMAModel": """simple docstring""" _UpperCAmelCase , _UpperCAmelCase : Any = model_cls.load_config(lowerCAmelCase__ , return_unused_kwargs=lowerCAmelCase__ ) _UpperCAmelCase : str = model_cls.from_pretrained(lowerCAmelCase__ ) _UpperCAmelCase : Any = cls(model.parameters() , model_cls=lowerCAmelCase__ , model_config=model.config ) ema_model.load_state_dict(lowerCAmelCase__ ) return ema_model def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : Optional[Any] ) -> Optional[int]: """simple docstring""" if self.model_cls is None: raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." ) if self.model_config is None: raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." ) _UpperCAmelCase : Dict = self.model_cls.from_config(self.model_config ) _UpperCAmelCase : str = self.state_dict() state_dict.pop("shadow_params" , lowerCAmelCase__ ) model.register_to_config(**lowerCAmelCase__ ) self.copy_to(model.parameters() ) model.save_pretrained(lowerCAmelCase__ ) def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : int ) -> float: """simple docstring""" _UpperCAmelCase : Tuple = max(0 , optimization_step - self.update_after_step - 1 ) if step <= 0: return 0.0 if self.use_ema_warmup: _UpperCAmelCase : Tuple = 1 - (1 + step / self.inv_gamma) ** -self.power else: _UpperCAmelCase : List[Any] = (1 + step) / (1_0 + step) _UpperCAmelCase : List[str] = min(lowerCAmelCase__ , self.decay ) # make sure decay is not smaller than min_decay _UpperCAmelCase : Dict = max(lowerCAmelCase__ , self.min_decay ) return cur_decay_value @torch.no_grad() def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Iterable[torch.nn.Parameter] ) -> str: """simple docstring""" if isinstance(lowerCAmelCase__ , torch.nn.Module ): _UpperCAmelCase : int = ( "Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. " "Please pass the parameters of the module instead." ) deprecate( "passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ , ) _UpperCAmelCase : List[str] = parameters.parameters() _UpperCAmelCase : Any = list(lowerCAmelCase__ ) self.optimization_step += 1 # Compute the decay factor for the exponential moving average. _UpperCAmelCase : Union[str, Any] = self.get_decay(self.optimization_step ) _UpperCAmelCase : Any = decay _UpperCAmelCase : Optional[Any] = 1 - decay _UpperCAmelCase : Dict = contextlib.nullcontext if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): import deepspeed for s_param, param in zip(self.shadow_params , lowerCAmelCase__ ): if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): _UpperCAmelCase : Optional[Any] = deepspeed.zero.GatheredParameters(lowerCAmelCase__ , modifier_rank=lowerCAmelCase__ ) with context_manager(): if param.requires_grad: s_param.sub_(one_minus_decay * (s_param - param) ) else: s_param.copy_(lowerCAmelCase__ ) def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Iterable[torch.nn.Parameter] ) -> None: """simple docstring""" _UpperCAmelCase : int = list(lowerCAmelCase__ ) for s_param, param in zip(self.shadow_params , lowerCAmelCase__ ): param.data.copy_(s_param.to(param.device ).data ) def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Dict=None ) -> None: """simple docstring""" _UpperCAmelCase : str = [ p.to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ) if p.is_floating_point() else p.to(device=lowerCAmelCase__ ) for p in self.shadow_params ] def _lowerCAmelCase ( self : Tuple ) -> dict: """simple docstring""" return { "decay": self.decay, "min_decay": self.min_decay, "optimization_step": self.optimization_step, "update_after_step": self.update_after_step, "use_ema_warmup": self.use_ema_warmup, "inv_gamma": self.inv_gamma, "power": self.power, "shadow_params": self.shadow_params, } def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Iterable[torch.nn.Parameter] ) -> None: """simple docstring""" _UpperCAmelCase : Dict = [param.detach().cpu().clone() for param in parameters] def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : Iterable[torch.nn.Parameter] ) -> None: """simple docstring""" if self.temp_stored_params is None: raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" ) for c_param, param in zip(self.temp_stored_params , lowerCAmelCase__ ): param.data.copy_(c_param.data ) # Better memory-wise. _UpperCAmelCase : Dict = None def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : dict ) -> None: """simple docstring""" _UpperCAmelCase : Optional[Any] = copy.deepcopy(lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = state_dict.get("decay" , self.decay ) if self.decay < 0.0 or self.decay > 1.0: raise ValueError("Decay must be between 0 and 1" ) _UpperCAmelCase : List[str] = state_dict.get("min_decay" , self.min_decay ) if not isinstance(self.min_decay , lowerCAmelCase__ ): raise ValueError("Invalid min_decay" ) _UpperCAmelCase : Tuple = state_dict.get("optimization_step" , self.optimization_step ) if not isinstance(self.optimization_step , lowerCAmelCase__ ): raise ValueError("Invalid optimization_step" ) _UpperCAmelCase : Dict = state_dict.get("update_after_step" , self.update_after_step ) if not isinstance(self.update_after_step , lowerCAmelCase__ ): raise ValueError("Invalid update_after_step" ) _UpperCAmelCase : Union[str, Any] = state_dict.get("use_ema_warmup" , self.use_ema_warmup ) if not isinstance(self.use_ema_warmup , lowerCAmelCase__ ): raise ValueError("Invalid use_ema_warmup" ) _UpperCAmelCase : Optional[Any] = state_dict.get("inv_gamma" , self.inv_gamma ) if not isinstance(self.inv_gamma , (float, int) ): raise ValueError("Invalid inv_gamma" ) _UpperCAmelCase : int = state_dict.get("power" , self.power ) if not isinstance(self.power , (float, int) ): raise ValueError("Invalid power" ) _UpperCAmelCase : List[Any] = state_dict.get("shadow_params" , lowerCAmelCase__ ) if shadow_params is not None: _UpperCAmelCase : Optional[Any] = shadow_params if not isinstance(self.shadow_params , lowerCAmelCase__ ): raise ValueError("shadow_params must be a list" ) if not all(isinstance(lowerCAmelCase__ , torch.Tensor ) for p in self.shadow_params ): raise ValueError("shadow_params must all be Tensors" )
145
'''simple docstring''' def __UpperCAmelCase ( a_: str, a_: str ): if len(a_ ) != len(a_ ): raise ValueError("String lengths must match!" ) _UpperCAmelCase : Dict = 0 for chara, chara in zip(a_, a_ ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
145
1
'''simple docstring''' from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: Optional[Any] ,__UpperCamelCase: Tuple=1e-12 ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = jnp.divide(emb_a.T ,jnp.clip(jnp.linalg.norm(__UpperCamelCase ,axis=1 ) ,a_min=__UpperCamelCase ) ).T SCREAMING_SNAKE_CASE : Dict = jnp.divide(emb_a.T ,jnp.clip(jnp.linalg.norm(__UpperCamelCase ,axis=1 ) ,a_min=__UpperCamelCase ) ).T return jnp.matmul(__UpperCamelCase ,norm_emb_a.T ) class _a ( nn.Module ): '''simple docstring''' A : CLIPConfig A : jnp.dtype = jnp.floataa def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = FlaxCLIPVisionModule(self.config.vision_config ) SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Dense(self.config.projection_dim, use_bias=A, dtype=self.dtype ) SCREAMING_SNAKE_CASE : List[Any] = self.param('concept_embeds', jax.nn.initializers.ones, (17, self.config.projection_dim) ) SCREAMING_SNAKE_CASE : Optional[int] = self.param( 'special_care_embeds', jax.nn.initializers.ones, (3, self.config.projection_dim) ) SCREAMING_SNAKE_CASE : int = self.param('concept_embeds_weights', jax.nn.initializers.ones, (17,) ) SCREAMING_SNAKE_CASE : Optional[Any] = self.param('special_care_embeds_weights', jax.nn.initializers.ones, (3,) ) def __call__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.vision_model(A )[1] SCREAMING_SNAKE_CASE : str = self.visual_projection(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = jax_cosine_distance(A, self.special_care_embeds ) SCREAMING_SNAKE_CASE : List[str] = jax_cosine_distance(A, self.concept_embeds ) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs SCREAMING_SNAKE_CASE : str = 0.0 SCREAMING_SNAKE_CASE : Dict = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment SCREAMING_SNAKE_CASE : Any = jnp.round(A, 3 ) SCREAMING_SNAKE_CASE : Any = jnp.any(special_scores > 0, axis=1, keepdims=A ) # Use a lower threshold if an image has any special care concept SCREAMING_SNAKE_CASE : int = is_special_care * 0.01 SCREAMING_SNAKE_CASE : List[Any] = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment SCREAMING_SNAKE_CASE : int = jnp.round(A, 3 ) SCREAMING_SNAKE_CASE : Optional[Any] = jnp.any(concept_scores > 0, axis=1 ) return has_nsfw_concepts class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : List[Any] = CLIPConfig A : int = '''clip_input''' A : Optional[Any] = FlaxStableDiffusionSafetyCheckerModule def __init__( self, A, A = None, A = 0, A = jnp.floataa, A = True, **A, ): '''simple docstring''' if input_shape is None: SCREAMING_SNAKE_CASE : Tuple = (1, 224, 224, 3) SCREAMING_SNAKE_CASE : Optional[int] = self.module_class(config=A, dtype=A, **A ) super().__init__(A, A, input_shape=A, seed=A, dtype=A, _do_init=_do_init ) def UpperCamelCase_ ( self, A, A, A = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = jax.random.normal(A, A ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = jax.random.split(A ) SCREAMING_SNAKE_CASE : int = {'params': params_rng, 'dropout': dropout_rng} SCREAMING_SNAKE_CASE : int = self.module.init(A, A )['params'] return random_params def __call__( self, A, A = None, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = jnp.transpose(A, (0, 2, 3, 1) ) return self.module.apply( {'params': params or self.params}, jnp.array(A, dtype=jnp.floataa ), rngs={}, )
246
'''simple docstring''' from scipy.stats import spearmanr import datasets UpperCamelCase_ = "\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n" UpperCamelCase_ = "\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {'spearmanr': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results['spearmanr'])\n -0.7\n >>> print(round(results['spearmanr_pvalue'], 2))\n 0.19\n" UpperCamelCase_ = R"\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _a ( datasets.Metric ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { 'predictions': datasets.Value('float' ), 'references': datasets.Value('float' ), } ), reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'], ) def UpperCamelCase_ ( self, A, A, A=False ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = spearmanr(A, A ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
246
1
"""simple docstring""" from collections.abc import Sequence def _lowerCamelCase( a = None ): if nums is None or not nums: raise ValueError("Input sequence should not be empty" ) __a = nums[0] for i in range(1 , len(a ) ): __a = nums[i] __a = max(a , ans + num , a ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user SCREAMING_SNAKE_CASE__:Optional[int] = int(input("""Enter number of elements : """).strip()) SCREAMING_SNAKE_CASE__:str = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n] print(max_subsequence_sum(array))
261
"""simple docstring""" # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( """stable diffusion controlnet""", """0.22.0""", """Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""", standard_warn=False, stacklevel=3, )
261
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger(__name__) def lowerCamelCase_ ( _a : Optional[int] ): '''simple docstring''' UpperCAmelCase_ : List[Any] = DPTConfig(embedding_type="""hybrid""" ) if "large" in checkpoint_url: UpperCAmelCase_ : Union[str, Any] = 1024 UpperCAmelCase_ : int = 4096 UpperCAmelCase_ : List[Any] = 24 UpperCAmelCase_ : Any = 16 UpperCAmelCase_ : Optional[Any] = [5, 11, 17, 23] UpperCAmelCase_ : Dict = [256, 512, 1024, 1024] UpperCAmelCase_ : Tuple = (1, 384, 384) if "nyu" or "midas" in checkpoint_url: UpperCAmelCase_ : Optional[Any] = 768 UpperCAmelCase_ : Any = [1, 1, 1, 0.5] UpperCAmelCase_ : Dict = [256, 512, 768, 768] UpperCAmelCase_ : Tuple = 150 UpperCAmelCase_ : str = 16 UpperCAmelCase_ : Any = (1, 384, 384) UpperCAmelCase_ : Any = False UpperCAmelCase_ : List[str] = """project""" if "ade" in checkpoint_url: UpperCAmelCase_ : Union[str, Any] = True UpperCAmelCase_ : Union[str, Any] = 768 UpperCAmelCase_ : Union[str, Any] = [1, 1, 1, 0.5] UpperCAmelCase_ : Any = 150 UpperCAmelCase_ : Tuple = 16 UpperCAmelCase_ : Union[str, Any] = """huggingface/label-files""" UpperCAmelCase_ : int = """ade20k-id2label.json""" UpperCAmelCase_ : Dict = json.load(open(cached_download(hf_hub_url(_a , _a , repo_type="""dataset""" ) ) , """r""" ) ) UpperCAmelCase_ : Optional[Any] = {int(_a ): v for k, v in idalabel.items()} UpperCAmelCase_ : str = idalabel UpperCAmelCase_ : List[str] = {v: k for k, v in idalabel.items()} UpperCAmelCase_ : Tuple = [1, 150, 480, 480] return config, expected_shape def lowerCamelCase_ ( _a : int ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""] for k in ignore_keys: state_dict.pop(_a , _a ) def lowerCamelCase_ ( _a : Dict ): '''simple docstring''' if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): UpperCAmelCase_ : str = name.replace("""pretrained.model""" , """dpt.encoder""" ) if "pretrained.model" in name: UpperCAmelCase_ : Optional[Any] = name.replace("""pretrained.model""" , """dpt.embeddings""" ) if "patch_embed" in name: UpperCAmelCase_ : Tuple = name.replace("""patch_embed""" , """""" ) if "pos_embed" in name: UpperCAmelCase_ : Any = name.replace("""pos_embed""" , """position_embeddings""" ) if "attn.proj" in name: UpperCAmelCase_ : List[Any] = name.replace("""attn.proj""" , """attention.output.dense""" ) if "proj" in name and "project" not in name: UpperCAmelCase_ : int = name.replace("""proj""" , """projection""" ) if "blocks" in name: UpperCAmelCase_ : Union[str, Any] = name.replace("""blocks""" , """layer""" ) if "mlp.fc1" in name: UpperCAmelCase_ : List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: UpperCAmelCase_ : Tuple = name.replace("""mlp.fc2""" , """output.dense""" ) if "norm1" in name and "backbone" not in name: UpperCAmelCase_ : List[str] = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name and "backbone" not in name: UpperCAmelCase_ : List[Any] = name.replace("""norm2""" , """layernorm_after""" ) if "scratch.output_conv" in name: UpperCAmelCase_ : Optional[int] = name.replace("""scratch.output_conv""" , """head""" ) if "scratch" in name: UpperCAmelCase_ : Any = name.replace("""scratch""" , """neck""" ) if "layer1_rn" in name: UpperCAmelCase_ : Any = name.replace("""layer1_rn""" , """convs.0""" ) if "layer2_rn" in name: UpperCAmelCase_ : int = name.replace("""layer2_rn""" , """convs.1""" ) if "layer3_rn" in name: UpperCAmelCase_ : Any = name.replace("""layer3_rn""" , """convs.2""" ) if "layer4_rn" in name: UpperCAmelCase_ : Tuple = name.replace("""layer4_rn""" , """convs.3""" ) if "refinenet" in name: UpperCAmelCase_ : int = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 UpperCAmelCase_ : str = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' ) if "out_conv" in name: UpperCAmelCase_ : str = name.replace("""out_conv""" , """projection""" ) if "resConfUnit1" in name: UpperCAmelCase_ : List[Any] = name.replace("""resConfUnit1""" , """residual_layer1""" ) if "resConfUnit2" in name: UpperCAmelCase_ : int = name.replace("""resConfUnit2""" , """residual_layer2""" ) if "conv1" in name: UpperCAmelCase_ : List[str] = name.replace("""conv1""" , """convolution1""" ) if "conv2" in name: UpperCAmelCase_ : List[str] = name.replace("""conv2""" , """convolution2""" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: UpperCAmelCase_ : Dict = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" ) if "pretrained.act_postprocess2.0.project.0" in name: UpperCAmelCase_ : List[str] = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" ) if "pretrained.act_postprocess3.0.project.0" in name: UpperCAmelCase_ : List[Any] = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" ) if "pretrained.act_postprocess4.0.project.0" in name: UpperCAmelCase_ : List[Any] = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" ) # resize blocks if "pretrained.act_postprocess1.3" in name: UpperCAmelCase_ : Tuple = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" ) if "pretrained.act_postprocess1.4" in name: UpperCAmelCase_ : Tuple = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" ) if "pretrained.act_postprocess2.3" in name: UpperCAmelCase_ : int = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" ) if "pretrained.act_postprocess2.4" in name: UpperCAmelCase_ : List[str] = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" ) if "pretrained.act_postprocess3.3" in name: UpperCAmelCase_ : int = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" ) if "pretrained.act_postprocess4.3" in name: UpperCAmelCase_ : Optional[int] = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" ) if "pretrained.act_postprocess4.4" in name: UpperCAmelCase_ : Tuple = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" ) if "pretrained" in name: UpperCAmelCase_ : Union[str, Any] = name.replace("""pretrained""" , """dpt""" ) if "bn" in name: UpperCAmelCase_ : Dict = name.replace("""bn""" , """batch_norm""" ) if "head" in name: UpperCAmelCase_ : Union[str, Any] = name.replace("""head""" , """head.head""" ) if "encoder.norm" in name: UpperCAmelCase_ : List[Any] = name.replace("""encoder.norm""" , """layernorm""" ) if "auxlayer" in name: UpperCAmelCase_ : Optional[int] = name.replace("""auxlayer""" , """auxiliary_head.head""" ) if "backbone" in name: UpperCAmelCase_ : Optional[int] = name.replace("""backbone""" , """backbone.bit.encoder""" ) if ".." in name: UpperCAmelCase_ : str = name.replace("""..""" , """.""" ) if "stem.conv" in name: UpperCAmelCase_ : List[Any] = name.replace("""stem.conv""" , """bit.embedder.convolution""" ) if "blocks" in name: UpperCAmelCase_ : List[Any] = name.replace("""blocks""" , """layers""" ) if "convolution" in name and "backbone" in name: UpperCAmelCase_ : List[str] = name.replace("""convolution""" , """conv""" ) if "layer" in name and "backbone" in name: UpperCAmelCase_ : Dict = name.replace("""layer""" , """layers""" ) if "backbone.bit.encoder.bit" in name: UpperCAmelCase_ : Optional[Any] = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" ) if "embedder.conv" in name: UpperCAmelCase_ : Any = name.replace("""embedder.conv""" , """embedder.convolution""" ) if "backbone.bit.encoder.stem.norm" in name: UpperCAmelCase_ : List[Any] = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" ) return name def lowerCamelCase_ ( _a : Dict , _a : int ): '''simple docstring''' for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCAmelCase_ : List[str] = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' ) UpperCAmelCase_ : Optional[Any] = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ : Optional[Any] = in_proj_weight[: config.hidden_size, :] UpperCAmelCase_ : Optional[int] = in_proj_bias[: config.hidden_size] UpperCAmelCase_ : str = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase_ : Optional[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCAmelCase_ : Tuple = in_proj_weight[ -config.hidden_size :, : ] UpperCAmelCase_ : str = in_proj_bias[-config.hidden_size :] def lowerCamelCase_ ( ): '''simple docstring''' UpperCAmelCase_ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCAmelCase_ : Dict = Image.open(requests.get(_a , stream=_a ).raw ) return im @torch.no_grad() def lowerCamelCase_ ( _a : List[Any] , _a : List[Any] , _a : Union[str, Any] , _a : Dict , _a : Any ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = get_dpt_config(_a ) # load original state_dict from URL # state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") UpperCAmelCase_ : Optional[Any] = torch.load(_a , map_location="""cpu""" ) # remove certain keys remove_ignore_keys_(_a ) # rename keys for key in state_dict.copy().keys(): UpperCAmelCase_ : str = state_dict.pop(_a ) UpperCAmelCase_ : Union[str, Any] = val # read in qkv matrices read_in_q_k_v(_a , _a ) # load HuggingFace model UpperCAmelCase_ : str = DPTForSemanticSegmentation(_a ) if """ade""" in checkpoint_url else DPTForDepthEstimation(_a ) model.load_state_dict(_a ) model.eval() # Check outputs on an image UpperCAmelCase_ : int = 480 if """ade""" in checkpoint_url else 384 UpperCAmelCase_ : Tuple = DPTImageProcessor(size=_a ) UpperCAmelCase_ : Optional[int] = prepare_img() UpperCAmelCase_ : Union[str, Any] = image_processor(_a , return_tensors="""pt""" ) # forward pass UpperCAmelCase_ : List[Any] = model(**_a ).logits if """ade""" in checkpoint_url else model(**_a ).predicted_depth if show_prediction: UpperCAmelCase_ : Union[str, Any] = ( torch.nn.functional.interpolate( outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=_a , ) .squeeze() .cpu() .numpy() ) Image.fromarray((prediction / prediction.max()) * 255 ).show() if pytorch_dump_folder_path is not None: Path(_a ).mkdir(exist_ok=_a ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(_a ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_a ) if push_to_hub: model.push_to_hub("""ybelkada/dpt-hybrid-midas""" ) image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''', type=str, help='''URL of the original DPT checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=False, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', ) parser.add_argument( '''--model_name''', default='''dpt-large''', type=str, help='''Name of the model, in case you\'re pushing to the hub.''', ) parser.add_argument( '''--show_prediction''', action='''store_true''', ) UpperCamelCase_ = parser.parse_args() convert_dpt_checkpoint( args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction )
59
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCamelCase_ = logging.get_logger(__name__) class _snake_case ( __snake_case ): '''simple docstring''' A__ : str = ["pixel_values"] def __init__( self: Optional[Any] ,lowerCamelCase_: bool = True ,lowerCamelCase_: Optional[Dict[str, int]] = None ,lowerCamelCase_: PILImageResampling = PILImageResampling.BICUBIC ,lowerCamelCase_: bool = True ,lowerCamelCase_: bool = True ,lowerCamelCase_: Union[int, float] = 1 / 255 ,lowerCamelCase_: Dict[str, int] = None ,lowerCamelCase_: bool = True ,lowerCamelCase_: Optional[Union[float, List[float]]] = None ,lowerCamelCase_: Optional[Union[float, List[float]]] = None ,**lowerCamelCase_: Union[str, Any] ,) -> None: super().__init__(**lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = size if size is not None else {"""height""": 224, """width""": 224} UpperCAmelCase_ : Union[str, Any] = get_size_dict(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} UpperCAmelCase_ : Tuple = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ ,param_name="""crop_size""" ) UpperCAmelCase_ : Union[str, Any] = do_resize UpperCAmelCase_ : Union[str, Any] = do_rescale UpperCAmelCase_ : str = do_normalize UpperCAmelCase_ : Optional[int] = do_center_crop UpperCAmelCase_ : str = crop_size UpperCAmelCase_ : List[str] = size UpperCAmelCase_ : Any = resample UpperCAmelCase_ : Tuple = rescale_factor UpperCAmelCase_ : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN UpperCAmelCase_ : str = image_std if image_std is not None else IMAGENET_DEFAULT_STD def A__ ( self: List[Any] ,lowerCamelCase_: np.ndarray ,lowerCamelCase_: Dict[str, int] ,lowerCamelCase_: PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase_: Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase_: Optional[int] ,) -> np.ndarray: UpperCAmelCase_ : Tuple = get_size_dict(lowerCamelCase_ ) if "shortest_edge" in size: UpperCAmelCase_ : Optional[Any] = get_resize_output_image_size(lowerCamelCase_ ,size=size["""shortest_edge"""] ,default_to_square=lowerCamelCase_ ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: UpperCAmelCase_ : Tuple = (size["""height"""], size["""width"""]) else: raise ValueError(F'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' ) return resize(lowerCamelCase_ ,size=lowerCamelCase_ ,resample=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ ) def A__ ( self: List[Any] ,lowerCamelCase_: np.ndarray ,lowerCamelCase_: Dict[str, int] ,lowerCamelCase_: Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase_: str ,) -> np.ndarray: UpperCAmelCase_ : Dict = get_size_dict(lowerCamelCase_ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(lowerCamelCase_ ,size=(size["""height"""], size["""width"""]) ,data_format=lowerCamelCase_ ,**lowerCamelCase_ ) def A__ ( self: Optional[int] ,lowerCamelCase_: np.ndarray ,lowerCamelCase_: float ,lowerCamelCase_: Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase_: List[str] ) -> np.ndarray: return rescale(lowerCamelCase_ ,scale=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ ) def A__ ( self: List[str] ,lowerCamelCase_: np.ndarray ,lowerCamelCase_: Union[float, List[float]] ,lowerCamelCase_: Union[float, List[float]] ,lowerCamelCase_: Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase_: Union[str, Any] ,) -> np.ndarray: return normalize(lowerCamelCase_ ,mean=lowerCamelCase_ ,std=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ ) def A__ ( self: Any ,lowerCamelCase_: ImageInput ,lowerCamelCase_: Optional[bool] = None ,lowerCamelCase_: Dict[str, int] = None ,lowerCamelCase_: PILImageResampling = None ,lowerCamelCase_: bool = None ,lowerCamelCase_: int = None ,lowerCamelCase_: Optional[bool] = None ,lowerCamelCase_: Optional[float] = None ,lowerCamelCase_: Optional[bool] = None ,lowerCamelCase_: Optional[Union[float, List[float]]] = None ,lowerCamelCase_: Optional[Union[float, List[float]]] = None ,lowerCamelCase_: Optional[Union[str, TensorType]] = None ,lowerCamelCase_: Union[str, ChannelDimension] = ChannelDimension.FIRST ,**lowerCamelCase_: List[str] ,) -> BatchFeature: UpperCAmelCase_ : Tuple = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase_ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size UpperCAmelCase_ : str = get_size_dict(lowerCamelCase_ ,param_name="""crop_size""" ,default_to_square=lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = resample if resample is not None else self.resample UpperCAmelCase_ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_ : Optional[Any] = image_mean if image_mean is not None else self.image_mean UpperCAmelCase_ : int = image_std if image_std is not None else self.image_std UpperCAmelCase_ : Dict = size if size is not None else self.size UpperCAmelCase_ : List[str] = get_size_dict(lowerCamelCase_ ) if not is_batched(lowerCamelCase_ ): UpperCAmelCase_ : Optional[int] = [images] if not valid_images(lowerCamelCase_ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) # All transformations expect numpy arrays. UpperCAmelCase_ : Tuple = [to_numpy_array(lowerCamelCase_ ) for image in images] if do_resize: UpperCAmelCase_ : int = [self.resize(image=lowerCamelCase_ ,size=lowerCamelCase_ ,resample=lowerCamelCase_ ) for image in images] if do_center_crop: UpperCAmelCase_ : Optional[int] = [self.center_crop(image=lowerCamelCase_ ,size=lowerCamelCase_ ) for image in images] if do_rescale: UpperCAmelCase_ : str = [self.rescale(image=lowerCamelCase_ ,scale=lowerCamelCase_ ) for image in images] if do_normalize: UpperCAmelCase_ : Dict = [self.normalize(image=lowerCamelCase_ ,mean=lowerCamelCase_ ,std=lowerCamelCase_ ) for image in images] UpperCAmelCase_ : Dict = [to_channel_dimension_format(lowerCamelCase_ ,lowerCamelCase_ ) for image in images] UpperCAmelCase_ : Tuple = {"""pixel_values""": images} return BatchFeature(data=lowerCamelCase_ ,tensor_type=lowerCamelCase_ )
59
1
"""simple docstring""" import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow _UpperCamelCase : Optional[int] = False class UpperCAmelCase_ ( unittest.TestCase): def _UpperCAmelCase ( self , a=3_2 ) -> Any: set_seed(0 ) lowercase__ : int = UNetaDModel(sample_size=a , in_channels=3 , out_channels=3 ) lowercase__ : Tuple = torch.optim.SGD(model.parameters() , lr=0.0_001 ) return model, optimizer @slow def _UpperCAmelCase ( self ) -> Any: lowercase__ : str = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable lowercase__ : Optional[int] = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=a , ) lowercase__ : Dict = DDIMScheduler( num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=a , ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0 ) lowercase__ : str = [torch.randn((4, 3, 3_2, 3_2) ).clip(-1 , 1 ).to(a ) for _ in range(4 )] lowercase__ : str = [torch.randn((4, 3, 3_2, 3_2) ).to(a ) for _ in range(4 )] lowercase__ : Any = [torch.randint(0 , 1_0_0_0 , (4,) ).long().to(a ) for _ in range(4 )] # train with a DDPM scheduler lowercase__ , lowercase__ : Any = self.get_model_optimizer(resolution=3_2 ) model.train().to(a ) for i in range(4 ): optimizer.zero_grad() lowercase__ : Optional[Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) lowercase__ : Optional[int] = model(a , timesteps[i] ).sample lowercase__ : List[str] = torch.nn.functional.mse_loss(a , noise[i] ) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM lowercase__ , lowercase__ : Any = self.get_model_optimizer(resolution=3_2 ) model.train().to(a ) for i in range(4 ): optimizer.zero_grad() lowercase__ : str = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) lowercase__ : Tuple = model(a , timesteps[i] ).sample lowercase__ : Union[str, Any] = torch.nn.functional.mse_loss(a , noise[i] ) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(a , a , atol=1e-5 ) ) self.assertTrue(torch.allclose(a , a , atol=1e-5 ) )
77
def __lowerCamelCase ( snake_case__ ) -> list: """simple docstring""" def merge(snake_case__ ,snake_case__ ) -> list: def _merge(): while left and right: yield (left if left[0] <= right[0] else right).pop(0 ) yield from left yield from right return list(_merge() ) if len(snake_case__ ) <= 1: return collection _SCREAMING_SNAKE_CASE = len(snake_case__ ) // 2 return merge(merge_sort(collection[:mid] ) ,merge_sort(collection[mid:] ) ) if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip() UpperCamelCase = [int(item) for item in user_input.split(''',''')] print(*merge_sort(unsorted), sep=''',''')
306
0
def UpperCAmelCase ( a_ , a_ ) -> float: """simple docstring""" if mass < 0: raise ValueError("The mass of a body cannot be negative" ) return 0.5 * mass * abs(a_ ) * abs(a_ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
124
import argparse import os import re import packaging.version SCREAMING_SNAKE_CASE :int = 'examples/' SCREAMING_SNAKE_CASE :str = { 'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), 'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), 'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'), 'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'), } SCREAMING_SNAKE_CASE :int = { 'init': 'src/diffusers/__init__.py', 'setup': 'setup.py', } SCREAMING_SNAKE_CASE :List[str] = 'README.md' def UpperCAmelCase ( a_ , a_ , a_ ) -> Tuple: """simple docstring""" with open(a_ , "r" , encoding="utf-8" , newline="\n" ) as f: __A = f.read() __A , __A = REPLACE_PATTERNS[pattern] __A = replace.replace("VERSION" , a_ ) __A = re_pattern.sub(a_ , a_ ) with open(a_ , "w" , encoding="utf-8" , newline="\n" ) as f: f.write(a_ ) def UpperCAmelCase ( a_ ) -> Optional[Any]: """simple docstring""" for folder, directories, fnames in os.walk(a_ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("research_projects" ) if "legacy" in directories: directories.remove("legacy" ) for fname in fnames: if fname.endswith(".py" ): update_version_in_file(os.path.join(a_ , a_ ) , a_ , pattern="examples" ) def UpperCAmelCase ( a_ , a_=False ) -> str: """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(a_ , a_ , a_ ) if not patch: update_version_in_examples(a_ ) def UpperCAmelCase ( ) -> Optional[int]: """simple docstring""" __A = "🤗 Transformers currently provides the following architectures" __A = "1. Want to contribute a new model?" with open(a_ , "r" , encoding="utf-8" , newline="\n" ) as f: __A = f.readlines() # Find the start of the list. __A = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 __A = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("1." ): __A = lines[index].replace( "https://huggingface.co/docs/diffusers/main/model_doc" , "https://huggingface.co/docs/diffusers/model_doc" , ) index += 1 with open(a_ , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(a_ ) def UpperCAmelCase ( ) -> Dict: """simple docstring""" with open(REPLACE_FILES["init"] , "r" ) as f: __A = f.read() __A = REPLACE_PATTERNS["init"][0].search(a_ ).groups()[0] return packaging.version.parse(a_ ) def UpperCAmelCase ( a_=False ) -> str: """simple docstring""" __A = get_version() if patch and default_version.is_devrelease: raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" ) if default_version.is_devrelease: __A = default_version.base_version elif patch: __A = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: __A = F'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. __A = input(F'''Which version are you releasing? [{default_version}]''' ) if len(a_ ) == 0: __A = default_version print(F'''Updating version to {version}.''' ) global_version_update(a_ , patch=a_ ) def UpperCAmelCase ( ) -> Optional[int]: """simple docstring""" __A = get_version() __A = F'''{current_version.major}.{current_version.minor + 1}.0.dev0''' __A = current_version.base_version # Check with the user we got that right. __A = input(F'''Which version are we developing now? [{dev_version}]''' ) if len(a_ ) == 0: __A = dev_version print(F'''Updating version to {version}.''' ) global_version_update(a_ ) # print("Cleaning main README, don't forget to run `make fix-copies`.") # clean_main_ref_in_model_list() if __name__ == "__main__": SCREAMING_SNAKE_CASE :Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.') parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.') SCREAMING_SNAKE_CASE :List[str] = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('Nothing to do after a patch :-)') else: post_release_work()
124
1
from collections import deque class __lowercase : """simple docstring""" def __init__( self , A , A , A ) -> None: snake_case : Any = process_name # process name snake_case : List[Any] = arrival_time # arrival time of the process # completion time of finished process or last interrupted time snake_case : Union[str, Any] = arrival_time snake_case : List[Any] = burst_time # remaining burst time snake_case : Dict = 0 # total time of the process wait in ready queue snake_case : Union[str, Any] = 0 # time from arrival time to completion time class __lowercase : """simple docstring""" def __init__( self , A , A , A , A , ) -> None: # total number of mlfq's queues snake_case : Any = number_of_queues # time slice of queues that round robin algorithm applied snake_case : Optional[int] = time_slices # unfinished process is in this ready_queue snake_case : Dict = queue # current time snake_case : Dict = current_time # finished process is in this sequence queue snake_case : deque[Process] = deque() def UpperCAmelCase ( self ) -> list[str]: snake_case : Dict = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def UpperCAmelCase ( self , A ) -> list[int]: snake_case : Tuple = [] for i in range(len(A ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def UpperCAmelCase ( self , A ) -> list[int]: snake_case : Union[str, Any] = [] for i in range(len(A ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def UpperCAmelCase ( self , A ) -> list[int]: snake_case : Tuple = [] for i in range(len(A ) ): completion_times.append(queue[i].stop_time ) return completion_times def UpperCAmelCase ( self , A ) -> list[int]: return [q.burst_time for q in queue] def UpperCAmelCase ( self , A ) -> int: process.waiting_time += self.current_time - process.stop_time return process.waiting_time def UpperCAmelCase ( self , A ) -> deque[Process]: snake_case : deque[Process] = deque() # sequence deque of finished process while len(A ) != 0: snake_case : Optional[Any] = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(A ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 snake_case : Optional[Any] = 0 # set the process's turnaround time because it is finished snake_case : Dict = self.current_time - cp.arrival_time # set the completion time snake_case : List[str] = self.current_time # add the process to queue that has finished queue finished.append(A ) self.finish_queue.extend(A ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def UpperCAmelCase ( self , A , A ) -> tuple[deque[Process], deque[Process]]: snake_case : deque[Process] = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(A ) ): snake_case : Any = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(A ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time snake_case : Dict = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(A ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished snake_case : str = 0 # set the finish time snake_case : List[Any] = self.current_time # update the process' turnaround time because it is finished snake_case : Any = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(A ) self.finish_queue.extend(A ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def UpperCAmelCase ( self ) -> deque[Process]: # all queues except last one have round_robin algorithm for i in range(self.number_of_queues - 1 ): snake_case , snake_case : Any = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest lowerCamelCase : List[Any] = Process('P1', 0, 5_3) lowerCamelCase : Any = Process('P2', 0, 1_7) lowerCamelCase : List[Any] = Process('P3', 0, 6_8) lowerCamelCase : List[str] = Process('P4', 0, 2_4) lowerCamelCase : Optional[Any] = 3 lowerCamelCase : str = [1_7, 2_5] lowerCamelCase : Tuple = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])}) lowerCamelCase : Optional[int] = Process('P1', 0, 5_3) lowerCamelCase : Dict = Process('P2', 0, 1_7) lowerCamelCase : List[str] = Process('P3', 0, 6_8) lowerCamelCase : Any = Process('P4', 0, 2_4) lowerCamelCase : Optional[Any] = 3 lowerCamelCase : Tuple = [1_7, 2_5] lowerCamelCase : List[str] = deque([Pa, Pa, Pa, Pa]) lowerCamelCase : Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0) lowerCamelCase : int = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( f"""waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print completion times of processes(P1, P2, P3, P4) print( f"""completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print total turnaround times of processes(P1, P2, P3, P4) print( f"""turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print sequence of finished processes print( f"""sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}""" )
124
import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class __lowercase : """simple docstring""" def __init__( self , A , A=1_3 , A=7 , A=True , A=True , A=True , A=True , A=9_9 , A=3_2 , A=5 , A=4 , A=3_7 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=1_6 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> Dict: snake_case : Any = parent snake_case : List[Any] = batch_size snake_case : List[Any] = seq_length snake_case : Dict = is_training snake_case : List[str] = use_input_mask snake_case : List[str] = use_token_type_ids snake_case : Dict = use_labels snake_case : Optional[int] = vocab_size snake_case : Optional[int] = hidden_size snake_case : Optional[Any] = num_hidden_layers snake_case : Optional[Any] = num_attention_heads snake_case : Union[str, Any] = intermediate_size snake_case : List[Any] = hidden_act snake_case : int = hidden_dropout_prob snake_case : str = attention_probs_dropout_prob snake_case : List[Any] = max_position_embeddings snake_case : List[Any] = type_vocab_size snake_case : int = type_sequence_label_size snake_case : Optional[int] = initializer_range snake_case : Union[str, Any] = num_labels snake_case : List[str] = num_choices snake_case : Optional[int] = scope def UpperCAmelCase ( self ) -> int: snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case : Union[str, Any] = None if self.use_input_mask: snake_case : str = random_attention_mask([self.batch_size, self.seq_length] ) snake_case : Tuple = None if self.use_token_type_ids: snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case : Optional[int] = None snake_case : str = None snake_case : List[Any] = None if self.use_labels: snake_case : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case : int = ids_tensor([self.batch_size] , self.num_choices ) snake_case : List[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase ( self ) -> Any: return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , ) def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> List[str]: snake_case : Tuple = NystromformerModel(config=A ) model.to(A ) model.eval() snake_case : str = model(A , attention_mask=A , token_type_ids=A ) snake_case : List[str] = model(A , token_type_ids=A ) snake_case : Any = model(A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Union[str, Any]: snake_case : List[Any] = NystromformerForMaskedLM(config=A ) model.to(A ) model.eval() snake_case : List[Any] = model(A , attention_mask=A , token_type_ids=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Optional[Any]: snake_case : List[Any] = NystromformerForQuestionAnswering(config=A ) model.to(A ) model.eval() snake_case : Dict = model( A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Tuple: snake_case : Optional[int] = self.num_labels snake_case : Union[str, Any] = NystromformerForSequenceClassification(A ) model.to(A ) model.eval() snake_case : Union[str, Any] = model(A , attention_mask=A , token_type_ids=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> int: snake_case : Union[str, Any] = self.num_labels snake_case : Union[str, Any] = NystromformerForTokenClassification(config=A ) model.to(A ) model.eval() snake_case : int = model(A , attention_mask=A , token_type_ids=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Any: snake_case : List[Any] = self.num_choices snake_case : Union[str, Any] = NystromformerForMultipleChoice(config=A ) model.to(A ) model.eval() snake_case : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case : List[str] = model( A , attention_mask=A , token_type_ids=A , labels=A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase ( self ) -> Tuple: snake_case : Tuple = self.prepare_config_and_inputs() ( ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ) : Any = config_and_inputs snake_case : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __lowercase (UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): """simple docstring""" _snake_case = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) _snake_case = ( { """feature-extraction""": NystromformerModel, """fill-mask""": NystromformerForMaskedLM, """question-answering""": NystromformerForQuestionAnswering, """text-classification""": NystromformerForSequenceClassification, """token-classification""": NystromformerForTokenClassification, """zero-shot""": NystromformerForSequenceClassification, } if is_torch_available() else {} ) _snake_case = False _snake_case = False def UpperCAmelCase ( self ) -> str: snake_case : Dict = NystromformerModelTester(self ) snake_case : str = ConfigTester(self , config_class=A , hidden_size=3_7 ) def UpperCAmelCase ( self ) -> Tuple: self.config_tester.run_common_tests() def UpperCAmelCase ( self ) -> Any: snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def UpperCAmelCase ( self ) -> Tuple: snake_case : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: snake_case : int = type self.model_tester.create_and_check_model(*A ) def UpperCAmelCase ( self ) -> Dict: snake_case : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*A ) def UpperCAmelCase ( self ) -> Tuple: snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*A ) def UpperCAmelCase ( self ) -> Tuple: snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A ) def UpperCAmelCase ( self ) -> Tuple: snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A ) def UpperCAmelCase ( self ) -> Any: snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A ) @slow def UpperCAmelCase ( self ) -> Optional[int]: for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case : Any = NystromformerModel.from_pretrained(A ) self.assertIsNotNone(A ) @require_torch class __lowercase (unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase ( self ) -> Dict: snake_case : Optional[int] = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" ) snake_case : int = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): snake_case : Optional[int] = model(A )[0] snake_case : List[Any] = torch.Size((1, 6, 7_6_8) ) self.assertEqual(output.shape , A ) snake_case : Union[str, Any] = torch.tensor( [[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , A , atol=1e-4 ) ) @slow def UpperCAmelCase ( self ) -> Optional[int]: snake_case : Union[str, Any] = """the [MASK] of Belgium is Brussels""" snake_case : Any = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" ) snake_case : Union[str, Any] = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" ) snake_case : int = tokenizer(A , return_tensors="""pt""" ) with torch.no_grad(): snake_case : Optional[int] = model(encoding.input_ids ).logits snake_case : str = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(A ) , """capital""" )
124
1
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer __lowerCamelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name __lowerCamelCase : int = """ Examples: ```py >>> from PIL import Image >>> import torch >>> from diffusers import DiffusionPipeline >>> from diffusers.utils import export_to_gif, load_image >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") >>> repo = \"openai/shap-e-img2img\" >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16) >>> pipe = pipe.to(device) >>> guidance_scale = 3.0 >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\" >>> image = load_image(image_url).convert(\"RGB\") >>> images = pipe( ... image, ... guidance_scale=guidance_scale, ... num_inference_steps=64, ... frame_size=256, ... ).images >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\") ``` """ @dataclass class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = 42 class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" def __init__( self : Dict , __A : PriorTransformer , __A : CLIPVisionModel , __A : CLIPImageProcessor , __A : HeunDiscreteScheduler , __A : ShapERenderer , ): super().__init__() self.register_modules( prior=__A , image_encoder=__A , image_processor=__A , scheduler=__A , renderer=__A , ) def _lowercase ( self : str , __A : Any , __A : List[str] , __A : Union[str, Any] , __A : Optional[int] , __A : int , __A : Optional[Any] ): if latents is None: snake_case__ : List[Any] = randn_tensor(__A , generator=__A , device=__A , dtype=__A ) else: if latents.shape != shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) snake_case__ : Union[str, Any] = latents.to(__A ) snake_case__ : Union[str, Any] = latents * scheduler.init_noise_sigma return latents def _lowercase ( self : int , __A : List[str]=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) snake_case__ : Optional[Any] = torch.device(f'''cuda:{gpu_id}''' ) snake_case__ : Dict = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(__A , __A ) @property def _lowercase ( self : List[Any] ): if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ): return self.device for module in self.image_encoder.modules(): if ( hasattr(__A , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def _lowercase ( self : List[Any] , __A : Any , __A : Dict , __A : List[Any] , __A : Dict , ): if isinstance(__A , __A ) and isinstance(image[0] , torch.Tensor ): snake_case__ : List[Any] = torch.cat(__A , axis=0 ) if image[0].ndim == 4 else torch.stack(__A , axis=0 ) if not isinstance(__A , torch.Tensor ): snake_case__ : List[str] = self.image_processor(__A , return_tensors="pt" ).pixel_values[0].unsqueeze(0 ) snake_case__ : Tuple = image.to(dtype=self.image_encoder.dtype , device=__A ) snake_case__ : int = self.image_encoder(__A )["last_hidden_state"] snake_case__ : str = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 snake_case__ : Dict = image_embeds.repeat_interleave(__A , dim=0 ) if do_classifier_free_guidance: snake_case__ : Optional[int] = torch.zeros_like(__A ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes snake_case__ : Any = torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(__A ) def __call__( self : Optional[Any] , __A : Union[PIL.Image.Image, List[PIL.Image.Image]] , __A : int = 1 , __A : int = 2_5 , __A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __A : Optional[torch.FloatTensor] = None , __A : float = 4.0 , __A : int = 6_4 , __A : Optional[str] = "pil" , __A : bool = True , ): if isinstance(__A , PIL.Image.Image ): snake_case__ : Dict = 1 elif isinstance(__A , torch.Tensor ): snake_case__ : Tuple = image.shape[0] elif isinstance(__A , __A ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): snake_case__ : str = len(__A ) else: raise ValueError( f'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__A )}''' ) snake_case__ : List[Any] = self._execution_device snake_case__ : int = batch_size * num_images_per_prompt snake_case__ : Optional[int] = guidance_scale > 1.0 snake_case__ : List[str] = self._encode_image(__A , __A , __A , __A ) # prior self.scheduler.set_timesteps(__A , device=__A ) snake_case__ : Any = self.scheduler.timesteps snake_case__ : Optional[int] = self.prior.config.num_embeddings snake_case__ : Tuple = self.prior.config.embedding_dim snake_case__ : int = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , __A , __A , __A , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim snake_case__ : Optional[Any] = latents.reshape(latents.shape[0] , __A , __A ) for i, t in enumerate(self.progress_bar(__A ) ): # expand the latents if we are doing classifier free guidance snake_case__ : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents snake_case__ : Tuple = self.scheduler.scale_model_input(__A , __A ) snake_case__ : Optional[int] = self.prior( __A , timestep=__A , proj_embedding=__A , ).predicted_image_embedding # remove the variance snake_case__, snake_case__ : Tuple = noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: snake_case__, snake_case__ : List[str] = noise_pred.chunk(2 ) snake_case__ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) snake_case__ : str = self.scheduler.step( __A , timestep=__A , sample=__A , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=__A ) snake_case__ : str = [] for i, latent in enumerate(__A ): print() snake_case__ : List[str] = self.renderer.decode( latent[None, :] , __A , size=__A , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , ) images.append(__A ) snake_case__ : Union[str, Any] = torch.stack(__A ) if output_type not in ["np", "pil"]: raise ValueError(f'''Only the output types `pil` and `np` are supported not output_type={output_type}''' ) snake_case__ : Dict = images.cpu().numpy() if output_type == "pil": snake_case__ : List[str] = [self.numpy_to_pil(__A ) for image in images] # Offload last model to CPU if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=__A )
286
import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml __lowerCamelCase : List[Any] = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( snake_case_ : bool , snake_case_ : bool ): def run_func(snake_case_ : str ): @wraps(snake_case_ ) def run_in_eager_mode(*snake_case_ : str , **snake_case_ : Union[str, Any] ): return func(*snake_case_ , **snake_case_ ) @wraps(snake_case_ ) @tf.function(experimental_compile=snake_case_ ) def run_in_graph_mode(*snake_case_ : List[Any] , **snake_case_ : List[Any] ): return func(*snake_case_ , **snake_case_ ) if do_eager_mode is True: if use_xla is not False: raise ValueError( "Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." ) return run_in_eager_mode else: return run_in_graph_mode return run_func def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : int , snake_case_ : int ): snake_case__ : Dict = random.Random() snake_case__ : List[Any] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(snake_case_ , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = 42 a_ = 42 a_ = "TensorFlow" @property def _lowercase ( self : List[str] ): return tf.__version__ def _lowercase ( self : List[str] , __A : str , __A : int , __A : int ): # initialize GPU on separate process snake_case__ : str = self.args.strategy if strategy is None: raise ValueError("A device strategy has to be initialized before using TensorFlow." ) snake_case__ : Dict = self._prepare_inference_func(__A , __A , __A ) return self._measure_speed(_inference ) def _lowercase ( self : Tuple , __A : str , __A : int , __A : int ): snake_case__ : Optional[int] = self.args.strategy if strategy is None: raise ValueError("A device strategy has to be initialized before using TensorFlow." ) snake_case__ : Any = self._prepare_train_func(__A , __A , __A ) return self._measure_speed(_train ) def _lowercase ( self : List[Any] , __A : str , __A : int , __A : int ): # initialize GPU on separate process if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __A ) snake_case__ : List[str] = self.args.strategy if strategy is None: raise ValueError("A device strategy has to be initialized before using TensorFlow." ) snake_case__ : Optional[Any] = self._prepare_inference_func(__A , __A , __A ) return self._measure_memory(_inference ) def _lowercase ( self : str , __A : str , __A : int , __A : int ): if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __A ) snake_case__ : List[Any] = self.args.strategy if strategy is None: raise ValueError("A device strategy has to be initialized before using TensorFlow." ) snake_case__ : int = self._prepare_train_func(__A , __A , __A ) return self._measure_memory(_train ) def _lowercase ( self : Union[str, Any] , __A : str , __A : int , __A : int ): snake_case__ : int = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("Mixed precision is currently not supported." ) snake_case__ : Tuple = ( hasattr(__A , "architectures" ) and isinstance(config.architectures , __A ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: snake_case__ : Dict = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model snake_case__ : Union[str, Any] = __import__("transformers" , fromlist=[model_class] ) snake_case__ : Any = getattr(__A , __A ) snake_case__ : Dict = model_cls(__A ) except ImportError: raise ImportError( f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' " set `--only_pretrain_model` or `args.only_pretrain_model=True`." ) else: snake_case__ : Dict = TF_MODEL_MAPPING[config.__class__](__A ) # encoder-decoder has vocab size saved differently snake_case__ : Optional[int] = config.vocab_size if hasattr(__A , "vocab_size" ) else config.encoder.vocab_size snake_case__ : List[Any] = random_input_ids(__A , __A , __A ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(__A , decoder_input_ids=__A , training=__A ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(__A , training=__A ) snake_case__ : Optional[int] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def _lowercase ( self : List[str] , __A : str , __A : int , __A : int ): snake_case__ : Optional[Any] = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." ) if self.args.fpaa: raise NotImplementedError("Mixed precision is currently not supported." ) snake_case__ : Any = ( hasattr(__A , "architectures" ) and isinstance(config.architectures , __A ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: snake_case__ : Dict = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model snake_case__ : List[Any] = __import__("transformers" , fromlist=[model_class] ) snake_case__ : Optional[int] = getattr(__A , __A ) snake_case__ : str = model_cls(__A ) except ImportError: raise ImportError( f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' " set `--only_pretrain_model` or `args.only_pretrain_model=True`." ) else: snake_case__ : Union[str, Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__A ) # encoder-decoder has vocab size saved differently snake_case__ : Union[str, Any] = config.vocab_size if hasattr(__A , "vocab_size" ) else config.encoder.vocab_size snake_case__ : List[str] = random_input_ids(__A , __A , __A ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): snake_case__ : str = model(__A , decoder_input_ids=__A , labels=__A , training=__A )[0] snake_case__ : Dict = tf.gradients(__A , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): snake_case__ : Optional[Any] = model(__A , labels=__A , training=__A )[0] snake_case__ : Dict = tf.gradients(__A , model.trainable_variables ) return gradients snake_case__ : int = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def _lowercase ( self : int , __A : List[Any] ): with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" ) timeit.repeat(__A , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average snake_case__ : Optional[Any] = timeit.repeat( __A , repeat=self.args.repeat , number=1_0 , ) return min(__A ) / 1_0.0 except ResourceExhaustedError as e: self.print_fn(f'''Doesn\'t fit on GPU. {e}''' ) def _lowercase ( self : str , __A : Callable[[], None] ): logger.info( "Note that TensorFlow allocates more memory than " "it might need to speed up computation. " "The memory reported here corresponds to the memory " "reported by `nvidia-smi`, which can vary depending " "on total available memory on the GPU that is used." ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( "`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory" " consumption line by line." ) snake_case__ : Optional[int] = start_memory_tracing("transformers" ) if self.args.is_tpu: # tpu raise NotImplementedError( "Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking" " with `args.memory=False`" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( "py3nvml not installed, we won't log GPU memory usage. " "Install py3nvml (pip install py3nvml) to log information about GPU." ) snake_case__ : List[str] = "N/A" else: logger.info( "Measuring total GPU usage on GPU device. Make sure to not have additional processes" " running on the same GPU." ) # init nvml nvml.nvmlInit() func() snake_case__ : Any = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) snake_case__ : Optional[Any] = nvml.nvmlDeviceGetMemoryInfo(__A ) snake_case__ : Optional[int] = meminfo.used snake_case__ : Any = Memory(__A ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( "When enabling line by line tracing, the max peak memory for CPU is inaccurate in" " TensorFlow." ) snake_case__ : int = None else: snake_case__ : Any = measure_peak_memory_cpu(__A ) snake_case__ : Tuple = Memory(__A ) if isinstance(__A , __A ) else memory_bytes if self.args.trace_memory_line_by_line: snake_case__ : Optional[int] = stop_memory_tracing(__A ) if memory is None: snake_case__ : Dict = summary.total else: snake_case__ : List[str] = None return memory, summary except ResourceExhaustedError as e: self.print_fn(f'''Doesn\'t fit on GPU. {e}''' ) return "N/A", None
286
1
import inspect import unittest from transformers import RegNetConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import RegNetForImageClassification, RegNetModel from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _UpperCamelCase : def __init__( self :Union[str, Any] , lowerCamelCase :Dict , lowerCamelCase :Optional[Any]=3 , lowerCamelCase :Dict=32 , lowerCamelCase :Optional[int]=3 , lowerCamelCase :int=10 , lowerCamelCase :List[Any]=[10, 20, 30, 40] , lowerCamelCase :Union[str, Any]=[1, 1, 2, 1] , lowerCamelCase :Optional[Any]=True , lowerCamelCase :List[Any]=True , lowerCamelCase :Any="relu" , lowerCamelCase :Dict=3 , lowerCamelCase :int=None , ) -> Optional[int]: UpperCAmelCase__ = parent UpperCAmelCase__ = batch_size UpperCAmelCase__ = image_size UpperCAmelCase__ = num_channels UpperCAmelCase__ = embeddings_size UpperCAmelCase__ = hidden_sizes UpperCAmelCase__ = depths UpperCAmelCase__ = is_training UpperCAmelCase__ = use_labels UpperCAmelCase__ = hidden_act UpperCAmelCase__ = num_labels UpperCAmelCase__ = scope UpperCAmelCase__ = len(__lowerCamelCase ) def UpperCAmelCase_ ( self :List[Any] ) -> int: UpperCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ = None if self.use_labels: UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_labels ) UpperCAmelCase__ = self.get_config() return config, pixel_values, labels def UpperCAmelCase_ ( self :Optional[int] ) -> Dict: return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def UpperCAmelCase_ ( self :Optional[int] , lowerCamelCase :int , lowerCamelCase :Any , lowerCamelCase :Dict ) -> Optional[int]: UpperCAmelCase__ = RegNetModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCAmelCase__ = model(__lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def UpperCAmelCase_ ( self :List[Any] , lowerCamelCase :str , lowerCamelCase :str , lowerCamelCase :str ) -> int: UpperCAmelCase__ = self.num_labels UpperCAmelCase__ = RegNetForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCAmelCase__ = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase_ ( self :Any ) -> str: UpperCAmelCase__ = self.prepare_config_and_inputs() UpperCAmelCase__ = config_and_inputs UpperCAmelCase__ = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _UpperCamelCase ( _lowercase , _lowercase , unittest.TestCase ): UpperCAmelCase_ = (RegNetModel, RegNetForImageClassification) if is_torch_available() else () UpperCAmelCase_ = ( {"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification} if is_torch_available() else {} ) UpperCAmelCase_ = False UpperCAmelCase_ = False UpperCAmelCase_ = False UpperCAmelCase_ = False def UpperCAmelCase_ ( self :int ) -> Optional[Any]: UpperCAmelCase__ = RegNetModelTester(self ) UpperCAmelCase__ = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase ) def UpperCAmelCase_ ( self :List[str] ) -> Any: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase_ ( self :str ) -> List[str]: return @unittest.skip(reason="RegNet does not use inputs_embeds" ) def UpperCAmelCase_ ( self :Optional[Any] ) -> int: pass @unittest.skip(reason="RegNet does not support input and output embeddings" ) def UpperCAmelCase_ ( self :str ) -> Any: pass def UpperCAmelCase_ ( self :Any ) -> Union[str, Any]: UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ = model_class(__lowerCamelCase ) UpperCAmelCase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ = [*signature.parameters.keys()] UpperCAmelCase__ = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def UpperCAmelCase_ ( self :List[str] ) -> int: UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def UpperCAmelCase_ ( self :int ) -> Optional[Any]: UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ = model_class(config=__lowerCamelCase ) for name, module in model.named_modules(): if isinstance(__lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) def UpperCAmelCase_ ( self :List[str] ) -> Optional[int]: def check_hidden_states_output(lowerCamelCase :int , lowerCamelCase :List[Any] , lowerCamelCase :str ): UpperCAmelCase__ = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): UpperCAmelCase__ = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) UpperCAmelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCAmelCase__ = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: UpperCAmelCase__ = layer_type UpperCAmelCase__ = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase__ = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def UpperCAmelCase_ ( self :List[str] ) -> str: UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def UpperCAmelCase_ ( self :List[str] ) -> Any: for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ = RegNetModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def lowerCAmelCase ( ): """simple docstring""" UpperCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _UpperCamelCase ( unittest.TestCase ): @cached_property def UpperCAmelCase_ ( self :Union[str, Any] ) -> Optional[int]: return ( AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def UpperCAmelCase_ ( self :int ) -> Optional[Any]: UpperCAmelCase__ = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCamelCase ) UpperCAmelCase__ = self.default_image_processor UpperCAmelCase__ = prepare_img() UpperCAmelCase__ = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): UpperCAmelCase__ = model(**__lowerCamelCase ) # verify the logits UpperCAmelCase__ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) UpperCAmelCase__ = torch.tensor([-0.41_80, -1.50_51, -3.48_36] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
169
import numpy as np import datasets _snake_case = ''' Compute the Mahalanobis Distance Mahalonobis distance is the distance between a point and a distribution. And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance. It was introduced by Prof. P. C. Mahalanobis in 1936 and has been used in various statistical applications ever since [source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/] ''' _snake_case = '''\ @article{de2000mahalanobis, title={The mahalanobis distance}, author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L}, journal={Chemometrics and intelligent laboratory systems}, volume={50}, number={1}, pages={1--18}, year={2000}, publisher={Elsevier} } ''' _snake_case = ''' Args: X: List of datapoints to be compared with the `reference_distribution`. reference_distribution: List of datapoints from the reference distribution we want to compare to. Returns: mahalanobis: The Mahalonobis distance for each datapoint in `X`. Examples: >>> mahalanobis_metric = datasets.load_metric("mahalanobis") >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]]) >>> print(results) {\'mahalanobis\': array([0.5])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _snake_case ( datasets.Metric ): def _lowerCamelCase ( self: int ) -> int: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ), } ) , ) def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: str ) -> Tuple: # convert to numpy arrays __UpperCAmelCase : Dict = np.array(__lowerCamelCase ) __UpperCAmelCase : Optional[int] = np.array(__lowerCamelCase ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError("Expected `X` to be a 2D vector" ) if len(reference_distribution.shape ) != 2: raise ValueError("Expected `reference_distribution` to be a 2D vector" ) if reference_distribution.shape[0] < 2: raise ValueError( "Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" ) # Get mahalanobis distance for each prediction __UpperCAmelCase : List[str] = X - np.mean(__lowerCamelCase ) __UpperCAmelCase : Tuple = np.cov(reference_distribution.T ) try: __UpperCAmelCase : int = np.linalg.inv(__lowerCamelCase ) except np.linalg.LinAlgError: __UpperCAmelCase : List[Any] = np.linalg.pinv(__lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = np.dot(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : Optional[int] = np.dot(__lowerCamelCase , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
157
0
'''simple docstring''' import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _SCREAMING_SNAKE_CASE : str = get_tests_dir("fixtures/spiece.model") @require_sentencepiece @require_tokenizers class _snake_case ( lowercase_ , unittest.TestCase ): lowerCAmelCase_ : int = DebertaVaTokenizer lowerCAmelCase_ : Optional[int] = DebertaVaTokenizerFast lowerCAmelCase_ : Any = True lowerCAmelCase_ : Optional[int] = True def lowerCAmelCase__ ( self ) -> int: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing snake_case_ = DebertaVaTokenizer(a__ , unk_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase__ ( self , a__ ) -> Any: '''simple docstring''' snake_case_ = "this is a test" snake_case_ = "this is a test" return input_text, output_text def lowerCAmelCase__ ( self ) -> int: '''simple docstring''' snake_case_ = "<pad>" snake_case_ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ ) def lowerCAmelCase__ ( self ) -> List[str]: '''simple docstring''' snake_case_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<pad>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "[PAD]" ) self.assertEqual(len(a__ ) , 30_001 ) def lowerCAmelCase__ ( self ) -> str: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 30_000 ) def lowerCAmelCase__ ( self ) -> str: '''simple docstring''' snake_case_ = " \tHeLLo!how \n Are yoU? " snake_case_ = ["▁hello", "!", "how", "▁are", "▁you", "?"] # fmt: on snake_case_ = DebertaVaTokenizer(a__ , do_lower_case=a__ ) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__ ) ) self.assertListEqual(a__ , a__ ) snake_case_ = DebertaVaTokenizerFast(a__ , do_lower_case=a__ ) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__ ) ) self.assertListEqual(a__ , a__ ) @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def lowerCAmelCase__ ( self ) -> int: '''simple docstring''' pass @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def lowerCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' pass def lowerCAmelCase__ ( self ) -> str: '''simple docstring''' snake_case_ = "I was born in 92000, and this is falsé." snake_case_ = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on snake_case_ = DebertaVaTokenizer(a__ , split_by_punct=a__ ) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__ ) ) self.assertListEqual(a__ , a__ ) snake_case_ = DebertaVaTokenizerFast(a__ , split_by_punct=a__ ) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__ ) ) self.assertListEqual(a__ , a__ ) def lowerCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' snake_case_ = "I was born in 92000, and this is falsé." snake_case_ = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on snake_case_ = DebertaVaTokenizer(a__ , do_lower_case=a__ , split_by_punct=a__ ) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__ ) ) self.assertListEqual(a__ , a__ ) snake_case_ = DebertaVaTokenizerFast(a__ , do_lower_case=a__ , split_by_punct=a__ ) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__ ) ) self.assertListEqual(a__ , a__ ) def lowerCAmelCase__ ( self ) -> int: '''simple docstring''' snake_case_ = "I was born in 92000, and this is falsé." snake_case_ = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on snake_case_ = DebertaVaTokenizer(a__ , do_lower_case=a__ , split_by_punct=a__ ) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__ ) ) self.assertListEqual(a__ , a__ ) snake_case_ = DebertaVaTokenizerFast(a__ , do_lower_case=a__ , split_by_punct=a__ ) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__ ) ) self.assertListEqual(a__ , a__ ) def lowerCAmelCase__ ( self ) -> str: '''simple docstring''' snake_case_ = "I was born in 92000, and this is falsé." snake_case_ = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on snake_case_ = DebertaVaTokenizer(a__ , do_lower_case=a__ , split_by_punct=a__ ) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__ ) ) self.assertListEqual(a__ , a__ ) snake_case_ = DebertaVaTokenizerFast(a__ , do_lower_case=a__ , split_by_punct=a__ ) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__ ) ) self.assertListEqual(a__ , a__ ) def lowerCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' snake_case_ = " \tHeLLo!how \n Are yoU? " snake_case_ = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"] # fmt: on snake_case_ = DebertaVaTokenizer(a__ , do_lower_case=a__ , split_by_punct=a__ ) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__ ) ) self.assertListEqual(a__ , a__ ) snake_case_ = DebertaVaTokenizerFast(a__ , do_lower_case=a__ , split_by_punct=a__ ) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__ ) ) self.assertListEqual(a__ , a__ ) def lowerCAmelCase__ ( self ) -> str: '''simple docstring''' snake_case_ = self.get_tokenizer() snake_case_ = self.get_rust_tokenizer() snake_case_ = "I was born in 92000, and this is falsé." snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__ ) ) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__ ) ) self.assertListEqual(a__ , a__ ) snake_case_ = tokenizer.encode(a__ , add_special_tokens=a__ ) snake_case_ = rust_tokenizer.encode(a__ , add_special_tokens=a__ ) self.assertListEqual(a__ , a__ ) snake_case_ = self.get_rust_tokenizer() snake_case_ = tokenizer.encode(a__ ) snake_case_ = rust_tokenizer.encode(a__ ) self.assertListEqual(a__ , a__ ) def lowerCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' snake_case_ = "This is a test" snake_case_ = [13, 1, 4_398, 25, 21, 1_289] snake_case_ = ["▁", "T", "his", "▁is", "▁a", "▁test"] snake_case_ = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"] snake_case_ = DebertaVaTokenizer(a__ , keep_accents=a__ ) snake_case_ = DebertaVaTokenizerFast(a__ , keep_accents=a__ ) snake_case_ = tokenizer.encode(a__ , add_special_tokens=a__ ) self.assertListEqual(a__ , a__ ) snake_case_ = tokenizer.tokenize(a__ ) self.assertListEqual(a__ , a__ ) snake_case_ = tokenizer.convert_ids_to_tokens(a__ ) self.assertListEqual(a__ , a__ ) snake_case_ = rust_tokenizer.encode(a__ , add_special_tokens=a__ ) self.assertListEqual(a__ , a__ ) snake_case_ = rust_tokenizer.tokenize(a__ ) self.assertListEqual(a__ , a__ ) snake_case_ = rust_tokenizer.convert_ids_to_tokens(a__ ) self.assertListEqual(a__ , a__ ) # fmt: off snake_case_ = "I was born in 92000, and this is falsé." snake_case_ = [13, 1, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] snake_case_ = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ] snake_case_ = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on snake_case_ = tokenizer.encode(a__ , add_special_tokens=a__ ) self.assertListEqual(a__ , a__ ) snake_case_ = tokenizer.tokenize(a__ ) self.assertListEqual(a__ , a__ ) snake_case_ = tokenizer.convert_ids_to_tokens(a__ ) self.assertListEqual(a__ , a__ ) snake_case_ = rust_tokenizer.encode(a__ , add_special_tokens=a__ ) self.assertListEqual(a__ , a__ ) snake_case_ = rust_tokenizer.tokenize(a__ ) self.assertListEqual(a__ , a__ ) snake_case_ = rust_tokenizer.convert_ids_to_tokens(a__ ) self.assertListEqual(a__ , a__ ) def lowerCAmelCase__ ( self ) -> List[str]: '''simple docstring''' snake_case_ = DebertaVaTokenizer(a__ ) snake_case_ = tokenizer.encode("sequence builders" ) snake_case_ = tokenizer.encode("multi-sequence build" ) snake_case_ = tokenizer.build_inputs_with_special_tokens(a__ ) snake_case_ = tokenizer.build_inputs_with_special_tokens(a__ , a__ ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , a__ ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , a__ , ) @slow def lowerCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' snake_case_ = {"input_ids": [[1, 39_867, 36, 19_390, 486, 27, 35_052, 81_436, 18, 60_685, 1_225, 7, 35_052, 81_436, 18, 9_367, 16_899, 18, 15_937, 53, 594, 773, 18, 16_287, 30_465, 36, 15_937, 6, 41_139, 38, 36_979, 60_763, 191, 6, 34_132, 99, 6, 50_538, 390, 43_230, 6, 34_132, 2_779, 20_850, 14, 699, 1_072, 1_194, 36, 382, 10_901, 53, 7, 699, 1_072, 2_084, 36, 20_422, 630, 53, 19, 105, 3_049, 1_896, 1_053, 16_899, 1_506, 11, 37_978, 4_243, 7, 1_237, 31_869, 200, 16_566, 654, 6, 35_052, 81_436, 7, 55_630, 13_593, 4, 2], [1, 26, 15_011, 13, 667, 8, 1_053, 18, 23_611, 1_237, 72_356, 12_820, 34, 104_134, 1_209, 35, 13_313, 6_627, 21, 202, 347, 7, 164, 2_399, 11, 46, 4_485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_232, 2_864, 15_785, 14_951, 105, 5, 8_581, 1_250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=a__ , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
92
'''simple docstring''' from ..utils import DummyObject, requires_backends class _snake_case ( metaclass=lowercase_ ): lowerCAmelCase_ : Dict = ["flax"] def __init__( self , *a__ , **a__ ) -> List[Any]: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> List[Any]: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> str: '''simple docstring''' requires_backends(cls , ["flax"] ) class _snake_case ( metaclass=lowercase_ ): lowerCAmelCase_ : Dict = ["flax"] def __init__( self , *a__ , **a__ ) -> Dict: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> List[str]: '''simple docstring''' requires_backends(cls , ["flax"] ) class _snake_case ( metaclass=lowercase_ ): lowerCAmelCase_ : Optional[Any] = ["flax"] def __init__( self , *a__ , **a__ ) -> str: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> List[str]: '''simple docstring''' requires_backends(cls , ["flax"] ) class _snake_case ( metaclass=lowercase_ ): lowerCAmelCase_ : Optional[Any] = ["flax"] def __init__( self , *a__ , **a__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> List[str]: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Dict: '''simple docstring''' requires_backends(cls , ["flax"] ) class _snake_case ( metaclass=lowercase_ ): lowerCAmelCase_ : List[str] = ["flax"] def __init__( self , *a__ , **a__ ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ["flax"] ) class _snake_case ( metaclass=lowercase_ ): lowerCAmelCase_ : int = ["flax"] def __init__( self , *a__ , **a__ ) -> List[str]: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> List[Any]: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Any: '''simple docstring''' requires_backends(cls , ["flax"] ) class _snake_case ( metaclass=lowercase_ ): lowerCAmelCase_ : Optional[Any] = ["flax"] def __init__( self , *a__ , **a__ ) -> Any: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> List[Any]: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> int: '''simple docstring''' requires_backends(cls , ["flax"] ) class _snake_case ( metaclass=lowercase_ ): lowerCAmelCase_ : Dict = ["flax"] def __init__( self , *a__ , **a__ ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> List[Any]: '''simple docstring''' requires_backends(cls , ["flax"] ) class _snake_case ( metaclass=lowercase_ ): lowerCAmelCase_ : List[str] = ["flax"] def __init__( self , *a__ , **a__ ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Dict: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ["flax"] ) class _snake_case ( metaclass=lowercase_ ): lowerCAmelCase_ : int = ["flax"] def __init__( self , *a__ , **a__ ) -> Dict: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> int: '''simple docstring''' requires_backends(cls , ["flax"] ) class _snake_case ( metaclass=lowercase_ ): lowerCAmelCase_ : Any = ["flax"] def __init__( self , *a__ , **a__ ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Tuple: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> str: '''simple docstring''' requires_backends(cls , ["flax"] ) class _snake_case ( metaclass=lowercase_ ): lowerCAmelCase_ : str = ["flax"] def __init__( self , *a__ , **a__ ) -> List[str]: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ["flax"] ) class _snake_case ( metaclass=lowercase_ ): lowerCAmelCase_ : Tuple = ["flax"] def __init__( self , *a__ , **a__ ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> int: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ["flax"] )
92
1
"""simple docstring""" import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class __lowerCamelCase ( A__ , unittest.TestCase ): '''simple docstring''' a_ : Tuple = TextToVideoSDPipeline a_ : Optional[int] = TEXT_TO_IMAGE_PARAMS a_ : str = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. a_ : str = frozenset( [ """num_inference_steps""", """generator""", """latents""", """return_dict""", """callback""", """callback_steps""", ] ) def lowerCamelCase ( self : Optional[Any] ): torch.manual_seed(0 ) lowerCAmelCase_ : Any = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , ) lowerCAmelCase_ : str = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_one=a_ , ) torch.manual_seed(0 ) lowerCAmelCase_ : List[Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) lowerCAmelCase_ : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , ) lowerCAmelCase_ : Dict = CLIPTextModel(a_ ) lowerCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) lowerCAmelCase_ : Union[str, Any] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def lowerCamelCase ( self : Any , a_ : List[Any] , a_ : Dict=0 ): if str(a_ ).startswith("mps" ): lowerCAmelCase_ : Union[str, Any] = torch.manual_seed(a_ ) else: lowerCAmelCase_ : str = torch.Generator(device=a_ ).manual_seed(a_ ) lowerCAmelCase_ : Union[str, Any] = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "pt", } return inputs def lowerCamelCase ( self : Any ): lowerCAmelCase_ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator lowerCAmelCase_ : str = self.get_dummy_components() lowerCAmelCase_ : List[str] = TextToVideoSDPipeline(**a_ ) lowerCAmelCase_ : Optional[Any] = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) lowerCAmelCase_ : Optional[Any] = self.get_dummy_inputs(a_ ) lowerCAmelCase_ : Dict = "np" lowerCAmelCase_ : Union[str, Any] = sd_pipe(**a_ ).frames lowerCAmelCase_ : Optional[int] = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) lowerCAmelCase_ : List[Any] = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCamelCase ( self : str ): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=a_ , expected_max_diff=3e-3 ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def lowerCamelCase ( self : Tuple ): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=a_ , expected_max_diff=1e-2 ) @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def lowerCamelCase ( self : Tuple ): pass @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def lowerCamelCase ( self : Tuple ): pass @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." ) def lowerCamelCase ( self : Tuple ): pass def lowerCamelCase ( self : Union[str, Any] ): return super().test_progress_bar() @slow @skip_mps class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase ( self : Dict ): lowerCAmelCase_ : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" ) lowerCAmelCase_ : List[str] = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" ) lowerCAmelCase_ : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) lowerCAmelCase_ : Any = pipe.to("cuda" ) lowerCAmelCase_ : List[str] = "Spiderman is surfing" lowerCAmelCase_ : str = torch.Generator(device="cpu" ).manual_seed(0 ) lowerCAmelCase_ : int = pipe(a_ , generator=a_ , num_inference_steps=25 , output_type="pt" ).frames lowerCAmelCase_ : Optional[int] = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2 def lowerCamelCase ( self : List[str] ): lowerCAmelCase_ : Dict = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" ) lowerCAmelCase_ : Optional[int] = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" ) lowerCAmelCase_ : Union[str, Any] = pipe.to("cuda" ) lowerCAmelCase_ : Optional[int] = "Spiderman is surfing" lowerCAmelCase_ : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 ) lowerCAmelCase_ : Union[str, Any] = pipe(a_ , generator=a_ , num_inference_steps=2 , output_type="pt" ).frames lowerCAmelCase_ : Union[str, Any] = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2
241
"""simple docstring""" import os def __lowerCamelCase ( ) -> Optional[Any]: """simple docstring""" lowerCAmelCase_ : Union[str, Any] = os.path.dirname(os.path.realpath(__UpperCamelCase ) ) lowerCAmelCase_ : List[str] = os.path.join(__UpperCamelCase , "triangle.txt" ) with open(__UpperCamelCase ) as f: lowerCAmelCase_ : Optional[int] = f.readlines() lowerCAmelCase_ : Union[str, Any] = [] for line in triangle: lowerCAmelCase_ : Any = [] for number in line.strip().split(" " ): numbers_from_line.append(int(__UpperCamelCase ) ) a.append(__UpperCamelCase ) for i in range(1 , len(__UpperCamelCase ) ): for j in range(len(a[i] ) ): lowerCAmelCase_ : Optional[Any] = a[i - 1][j] if j != len(a[i - 1] ) else 0 lowerCAmelCase_ : int = a[i - 1][j - 1] if j > 0 else 0 a[i][j] += max(__UpperCamelCase , __UpperCamelCase ) return max(a[-1] ) if __name__ == "__main__": print(solution())
241
1
'''simple docstring''' import numpy as np from transformers import Pipeline def UpperCAmelCase ( lowerCamelCase_ :Tuple ): '''simple docstring''' snake_case_ : List[str] = np.max(lowerCamelCase_ , axis=-1 , keepdims=lowerCamelCase_ ) snake_case_ : List[Any] = np.exp(outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowerCamelCase_ ) class __UpperCamelCase ( lowercase__ ): def a__ ( self :List[str] ,**_UpperCamelCase :Tuple ): snake_case_ : Any = {} if "second_text" in kwargs: snake_case_ : Tuple = kwargs["""second_text"""] return preprocess_kwargs, {}, {} def a__ ( self :int ,_UpperCamelCase :Dict ,_UpperCamelCase :str=None ): return self.tokenizer(_UpperCamelCase ,text_pair=_UpperCamelCase ,return_tensors=self.framework ) def a__ ( self :Any ,_UpperCamelCase :str ): return self.model(**_UpperCamelCase ) def a__ ( self :Any ,_UpperCamelCase :Optional[int] ): snake_case_ : Dict = model_outputs.logits[0].numpy() snake_case_ : Optional[int] = softmax(_UpperCamelCase ) snake_case_ : Dict = np.argmax(_UpperCamelCase ) snake_case_ : Any = self.model.config.idalabel[best_class] snake_case_ : Tuple = probabilities[best_class].item() snake_case_ : Union[str, Any] = logits.tolist() return {"label": label, "score": score, "logits": logits}
8
'''simple docstring''' import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor __A : Optional[int] = logging.get_logger(__name__) class __UpperCamelCase ( lowercase__ ): def __init__( self :List[str] ,*_UpperCamelCase :str ,**_UpperCamelCase :Optional[int] ): warnings.warn( """The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use MobileViTImageProcessor instead.""" ,_UpperCamelCase ,) super().__init__(*_UpperCamelCase ,**_UpperCamelCase )
8
1
class __snake_case : def __init__( self : str , A_ : Dict , A_ : Optional[int]): lowerCAmelCase_ : str = name lowerCAmelCase_ : List[Any] = val def __str__( self : List[str]): return F"""{self.__class__.__name__}({self.name}, {self.val})""" def __lt__( self : List[str] , A_ : Tuple): return self.val < other.val class __snake_case : def __init__( self : int , A_ : Dict): lowerCAmelCase_ : List[str] = {} lowerCAmelCase_ : Any = {} lowerCAmelCase_ : Dict = self.build_heap(A_) def __getitem__( self : List[str] , A_ : List[Any]): return self.get_value(A_) def UpperCAmelCase__ ( self : List[Any] , A_ : str): return (idx - 1) // 2 def UpperCAmelCase__ ( self : int , A_ : List[str]): return idx * 2 + 1 def UpperCAmelCase__ ( self : str , A_ : List[Any]): return idx * 2 + 2 def UpperCAmelCase__ ( self : Optional[Any] , A_ : Tuple): return self.heap_dict[key] def UpperCAmelCase__ ( self : Union[str, Any] , A_ : Tuple): lowerCAmelCase_ : Dict = len(A_) - 1 lowerCAmelCase_ : List[Any] = self.get_parent_idx(A_) for idx, i in enumerate(A_): lowerCAmelCase_ : List[str] = idx lowerCAmelCase_ : Any = i.val for i in range(A_ , -1 , -1): self.sift_down(A_ , A_) return array def UpperCAmelCase__ ( self : List[str] , A_ : int , A_ : Tuple): while True: lowerCAmelCase_ : Union[str, Any] = self.get_left_child_idx(A_) # noqa: E741 lowerCAmelCase_ : int = self.get_right_child_idx(A_) lowerCAmelCase_ : int = idx if l < len(A_) and array[l] < array[idx]: lowerCAmelCase_ : Any = l if r < len(A_) and array[r] < array[smallest]: lowerCAmelCase_ : List[str] = r if smallest != idx: lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = array[smallest], array[idx] ( ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ) : List[Any] = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) lowerCAmelCase_ : Union[str, Any] = smallest else: break def UpperCAmelCase__ ( self : Tuple , A_ : List[str]): lowerCAmelCase_ : Any = self.get_parent_idx(A_) while p >= 0 and self.heap[p] > self.heap[idx]: lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.heap[idx], self.heap[p] lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) lowerCAmelCase_ : int = p lowerCAmelCase_ : Tuple = self.get_parent_idx(A_) def UpperCAmelCase__ ( self : Optional[Any]): return self.heap[0] def UpperCAmelCase__ ( self : Tuple): lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.heap[-1], self.heap[0] lowerCAmelCase_ , lowerCAmelCase_ : Any = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) lowerCAmelCase_ : int = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 , self.heap) return x def UpperCAmelCase__ ( self : Any , A_ : Any): self.heap.append(A_) lowerCAmelCase_ : Optional[int] = len(self.heap) - 1 lowerCAmelCase_ : Optional[int] = node.val self.sift_up(len(self.heap) - 1) def UpperCAmelCase__ ( self : List[Any]): return len(self.heap) == 0 def UpperCAmelCase__ ( self : Optional[int] , A_ : Optional[Any] , A_ : int): assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" lowerCAmelCase_ : Tuple = new_value lowerCAmelCase_ : Tuple = new_value self.sift_up(self.idx_of_element[node]) A__ : List[str] = Node('''R''', -1) A__ : Optional[Any] = Node('''B''', 6) A__ : List[str] = Node('''A''', 3) A__ : Optional[Any] = Node('''X''', 1) A__ : Tuple = Node('''E''', 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array A__ : Optional[Any] = MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print('''Min Heap - before decrease key''') for i in my_min_heap.heap: print(i) print('''Min Heap - After decrease key of node [B -> -17]''') my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
103
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase_ = { "configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"], "tokenization_roformer": ["RoFormerTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["RoFormerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ "ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "RoFormerForCausalLM", "RoFormerForMaskedLM", "RoFormerForMultipleChoice", "RoFormerForQuestionAnswering", "RoFormerForSequenceClassification", "RoFormerForTokenClassification", "RoFormerLayer", "RoFormerModel", "RoFormerPreTrainedModel", "load_tf_weights_in_roformer", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ "TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRoFormerForCausalLM", "TFRoFormerForMaskedLM", "TFRoFormerForMultipleChoice", "TFRoFormerForQuestionAnswering", "TFRoFormerForSequenceClassification", "TFRoFormerForTokenClassification", "TFRoFormerLayer", "TFRoFormerModel", "TFRoFormerPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ "FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaxRoFormerForMaskedLM", "FlaxRoFormerForMultipleChoice", "FlaxRoFormerForQuestionAnswering", "FlaxRoFormerForSequenceClassification", "FlaxRoFormerForTokenClassification", "FlaxRoFormerModel", "FlaxRoFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
45
0
import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from torch import nn from torch.utils.data import DataLoader from transformers import MBartTokenizer, TaForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeqaSeqDataset, SeqaSeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa a : Optional[int] = logging.getLogger(__name__) class _a ( _lowerCAmelCase ): A = '''summarization''' A = ['''loss'''] A = ROUGE_KEYS A = '''rouge2''' def __init__(self, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> int: if hparams.sortish_sampler and hparams.gpus > 1: UpperCAmelCase_: Union[str, Any] = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" ) if hparams.sortish_sampler: raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" ) super().__init__(SCREAMING_SNAKE_CASE_, num_labels=SCREAMING_SNAKE_CASE_, mode=self.mode, **SCREAMING_SNAKE_CASE_ ) use_task_specific_params(self.model, """summarization""" ) save_git_info(self.hparams.output_dir ) UpperCAmelCase_: int = Path(self.output_dir ) / """metrics.json""" UpperCAmelCase_: Tuple = Path(self.output_dir ) / """hparams.pkl""" pickle_save(self.hparams, self.hparams_save_path ) UpperCAmelCase_: Optional[Any] = 0 UpperCAmelCase_: Tuple = defaultdict(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Any = self.config.model_type UpperCAmelCase_: int = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size UpperCAmelCase_: dict = { "data_dir": self.hparams.data_dir, "max_source_length": self.hparams.max_source_length, "prefix": self.model.config.prefix or "", } UpperCAmelCase_: str = { """train""": self.hparams.n_train, """val""": self.hparams.n_val, """test""": self.hparams.n_test, } UpperCAmelCase_: Optional[Any] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} UpperCAmelCase_: int = { """train""": self.hparams.max_target_length, """val""": self.hparams.val_max_target_length, """test""": self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], f'target_lens: {self.target_lens}' assert self.target_lens["train"] <= self.target_lens["test"], f'target_lens: {self.target_lens}' if self.hparams.freeze_embeds: freeze_embeds(self.model ) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder() ) assert_all_frozen(self.model.get_encoder() ) UpperCAmelCase_: Any = get_git_info()["""repo_sha"""] UpperCAmelCase_: Dict = hparams.num_workers UpperCAmelCase_: List[str] = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer, SCREAMING_SNAKE_CASE_ ): UpperCAmelCase_: List[str] = self.tokenizer.lang_code_to_id[hparams.tgt_lang] UpperCAmelCase_: Union[str, Any] = self.decoder_start_token_id UpperCAmelCase_: Optional[Any] = ( SeqaSeqDataset if hasattr(self.tokenizer, """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset ) UpperCAmelCase_: str = False UpperCAmelCase_: Optional[Any] = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: UpperCAmelCase_: Union[str, Any] = self.hparams.eval_max_gen_length else: UpperCAmelCase_: List[str] = self.model.config.max_length UpperCAmelCase_: Optional[int] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Dict[str, List[str]]: UpperCAmelCase_: List[Any] = { k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items() } save_json(SCREAMING_SNAKE_CASE_, Path(self.output_dir ) / """text_batch.json""" ) save_json({k: v.tolist() for k, v in batch.items()}, Path(self.output_dir ) / """tok_batch.json""" ) UpperCAmelCase_: Tuple = True return readable_batch def __snake_case (self, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Any: return self.model(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Tuple: UpperCAmelCase_: Optional[int] = self.tokenizer.batch_decode( SCREAMING_SNAKE_CASE_, skip_special_tokens=SCREAMING_SNAKE_CASE_, clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) return lmap(str.strip, SCREAMING_SNAKE_CASE_ ) def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Tuple: UpperCAmelCase_: Tuple = self.tokenizer.pad_token_id UpperCAmelCase_: Union[str, Any] = batch["""input_ids"""], batch["""attention_mask"""] UpperCAmelCase_: Any = batch["""labels"""] if isinstance(self.model, SCREAMING_SNAKE_CASE_ ): UpperCAmelCase_: List[Any] = self.model._shift_right(SCREAMING_SNAKE_CASE_ ) else: UpperCAmelCase_: int = shift_tokens_right(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero UpperCAmelCase_: List[str] = decoder_input_ids self.save_readable_batch(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Optional[int] = self(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, decoder_input_ids=SCREAMING_SNAKE_CASE_, use_cache=SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: List[str] = outputs["""logits"""] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id UpperCAmelCase_: Union[str, Any] = nn.CrossEntropyLoss(ignore_index=SCREAMING_SNAKE_CASE_ ) assert lm_logits.shape[-1] == self.vocab_size UpperCAmelCase_: Optional[int] = ce_loss_fct(lm_logits.view(-1, lm_logits.shape[-1] ), tgt_ids.view(-1 ) ) else: UpperCAmelCase_: Optional[int] = nn.functional.log_softmax(SCREAMING_SNAKE_CASE_, dim=-1 ) UpperCAmelCase_: List[Any] = label_smoothed_nll_loss( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, self.hparams.label_smoothing, ignore_index=SCREAMING_SNAKE_CASE_ ) return (loss,) @property def __snake_case (self ) -> int: return self.tokenizer.pad_token_id def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict: UpperCAmelCase_: List[str] = self._step(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Optional[Any] = dict(zip(self.loss_names, SCREAMING_SNAKE_CASE_ ) ) # tokens per batch UpperCAmelCase_: Optional[int] = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum() UpperCAmelCase_: List[Any] = batch["""input_ids"""].shape[0] UpperCAmelCase_: Tuple = batch["""input_ids"""].eq(self.pad ).sum() UpperCAmelCase_: Any = batch["""input_ids"""].eq(self.pad ).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict: return self._generative_step(SCREAMING_SNAKE_CASE_ ) def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_="val" ) -> Dict: self.step_count += 1 UpperCAmelCase_: Optional[int] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names} UpperCAmelCase_: List[Any] = losses["""loss"""] UpperCAmelCase_: int = { k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""] } UpperCAmelCase_: Dict = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) UpperCAmelCase_: torch.FloatTensor = torch.tensor(SCREAMING_SNAKE_CASE_ ).type_as(SCREAMING_SNAKE_CASE_ ) generative_metrics.update({k: v.item() for k, v in losses.items()} ) losses.update(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Optional[int] = {f'{prefix}_avg_{k}': x for k, x in losses.items()} UpperCAmelCase_: Union[str, Any] = self.step_count self.metrics[prefix].append(SCREAMING_SNAKE_CASE_ ) # callback writes this to self.metrics_save_path UpperCAmelCase_: Any = flatten_list([x["""preds"""] for x in outputs] ) return { "log": all_metrics, "preds": preds, f'{prefix}_loss': loss, f'{prefix}_{self.val_metric}': metric_tensor, } def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict: return calculate_rouge(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> dict: UpperCAmelCase_: Any = time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') UpperCAmelCase_: int = self.model.generate( batch["""input_ids"""], attention_mask=batch["""attention_mask"""], use_cache=SCREAMING_SNAKE_CASE_, decoder_start_token_id=self.decoder_start_token_id, num_beams=self.eval_beams, max_length=self.eval_max_length, ) UpperCAmelCase_: Union[str, Any] = (time.time() - ta) / batch["""input_ids"""].shape[0] UpperCAmelCase_: List[str] = self.ids_to_clean_text(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: List[str] = self.ids_to_clean_text(batch["""labels"""] ) UpperCAmelCase_: Optional[Any] = self._step(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Dict = dict(zip(self.loss_names, SCREAMING_SNAKE_CASE_ ) ) UpperCAmelCase_: Dict = self.calc_generative_metrics(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: int = np.mean(lmap(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) ) base_metrics.update(gen_time=SCREAMING_SNAKE_CASE_, gen_len=SCREAMING_SNAKE_CASE_, preds=SCREAMING_SNAKE_CASE_, target=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) return base_metrics def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]: return self._generative_step(SCREAMING_SNAKE_CASE_ ) def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: return self.validation_epoch_end(SCREAMING_SNAKE_CASE_, prefix="""test""" ) def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> SeqaSeqDataset: UpperCAmelCase_: Any = self.n_obs[type_path] UpperCAmelCase_: Optional[Any] = self.target_lens[type_path] UpperCAmelCase_: List[Any] = self.dataset_class( self.tokenizer, type_path=SCREAMING_SNAKE_CASE_, n_obs=SCREAMING_SNAKE_CASE_, max_target_length=SCREAMING_SNAKE_CASE_, **self.dataset_kwargs, ) return dataset def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = False ) -> DataLoader: UpperCAmelCase_: str = self.get_dataset(SCREAMING_SNAKE_CASE_ ) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": UpperCAmelCase_: Tuple = dataset.make_sortish_sampler(SCREAMING_SNAKE_CASE_, distributed=self.hparams.gpus > 1 ) return DataLoader( SCREAMING_SNAKE_CASE_, batch_size=SCREAMING_SNAKE_CASE_, collate_fn=dataset.collate_fn, shuffle=SCREAMING_SNAKE_CASE_, num_workers=self.num_workers, sampler=SCREAMING_SNAKE_CASE_, ) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": UpperCAmelCase_: Any = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch, distributed=self.hparams.gpus > 1 ) return DataLoader( SCREAMING_SNAKE_CASE_, batch_sampler=SCREAMING_SNAKE_CASE_, collate_fn=dataset.collate_fn, num_workers=self.num_workers, ) else: return DataLoader( SCREAMING_SNAKE_CASE_, batch_size=SCREAMING_SNAKE_CASE_, collate_fn=dataset.collate_fn, shuffle=SCREAMING_SNAKE_CASE_, num_workers=self.num_workers, sampler=SCREAMING_SNAKE_CASE_, ) def __snake_case (self ) -> DataLoader: UpperCAmelCase_: Optional[int] = self.get_dataloader("""train""", batch_size=self.hparams.train_batch_size, shuffle=SCREAMING_SNAKE_CASE_ ) return dataloader def __snake_case (self ) -> DataLoader: return self.get_dataloader("""val""", batch_size=self.hparams.eval_batch_size ) def __snake_case (self ) -> DataLoader: return self.get_dataloader("""test""", batch_size=self.hparams.eval_batch_size ) @staticmethod def __snake_case (SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Tuple: BaseTransformer.add_model_specific_args(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) add_generic_args(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) parser.add_argument( """--max_source_length""", default=1024, type=SCREAMING_SNAKE_CASE_, help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ), ) parser.add_argument( """--max_target_length""", default=56, type=SCREAMING_SNAKE_CASE_, help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ), ) parser.add_argument( """--val_max_target_length""", default=142, type=SCREAMING_SNAKE_CASE_, help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ), ) parser.add_argument( """--test_max_target_length""", default=142, type=SCREAMING_SNAKE_CASE_, help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ), ) parser.add_argument("""--freeze_encoder""", action="""store_true""" ) parser.add_argument("""--freeze_embeds""", action="""store_true""" ) parser.add_argument("""--sortish_sampler""", action="""store_true""", default=SCREAMING_SNAKE_CASE_ ) parser.add_argument("""--overwrite_output_dir""", action="""store_true""", default=SCREAMING_SNAKE_CASE_ ) parser.add_argument("""--max_tokens_per_batch""", type=SCREAMING_SNAKE_CASE_, default=SCREAMING_SNAKE_CASE_ ) parser.add_argument("""--logger_name""", type=SCREAMING_SNAKE_CASE_, choices=["""default""", """wandb""", """wandb_shared"""], default="""default""" ) parser.add_argument("""--n_train""", type=SCREAMING_SNAKE_CASE_, default=-1, required=SCREAMING_SNAKE_CASE_, help="""# examples. -1 means use all.""" ) parser.add_argument("""--n_val""", type=SCREAMING_SNAKE_CASE_, default=500, required=SCREAMING_SNAKE_CASE_, help="""# examples. -1 means use all.""" ) parser.add_argument("""--n_test""", type=SCREAMING_SNAKE_CASE_, default=-1, required=SCREAMING_SNAKE_CASE_, help="""# examples. -1 means use all.""" ) parser.add_argument( """--task""", type=SCREAMING_SNAKE_CASE_, default="""summarization""", required=SCREAMING_SNAKE_CASE_, help="""# examples. -1 means use all.""" ) parser.add_argument("""--label_smoothing""", type=SCREAMING_SNAKE_CASE_, default=0.0, required=SCREAMING_SNAKE_CASE_ ) parser.add_argument("""--src_lang""", type=SCREAMING_SNAKE_CASE_, default="""""", required=SCREAMING_SNAKE_CASE_ ) parser.add_argument("""--tgt_lang""", type=SCREAMING_SNAKE_CASE_, default="""""", required=SCREAMING_SNAKE_CASE_ ) parser.add_argument("""--eval_beams""", type=SCREAMING_SNAKE_CASE_, default=SCREAMING_SNAKE_CASE_, required=SCREAMING_SNAKE_CASE_ ) parser.add_argument( """--val_metric""", type=SCREAMING_SNAKE_CASE_, default=SCREAMING_SNAKE_CASE_, required=SCREAMING_SNAKE_CASE_, choices=["""bleu""", """rouge2""", """loss""", None] ) parser.add_argument("""--eval_max_gen_length""", type=SCREAMING_SNAKE_CASE_, default=SCREAMING_SNAKE_CASE_, help="""never generate more than n tokens""" ) parser.add_argument("""--save_top_k""", type=SCREAMING_SNAKE_CASE_, default=1, required=SCREAMING_SNAKE_CASE_, help="""How many checkpoints to save""" ) parser.add_argument( """--early_stopping_patience""", type=SCREAMING_SNAKE_CASE_, default=-1, required=SCREAMING_SNAKE_CASE_, help=( """-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So""" """ val_check_interval will effect it.""" ), ) return parser class _a ( _lowerCAmelCase ): A = '''translation''' A = ['''loss'''] A = ['''bleu'''] A = '''bleu''' def __init__(self, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: super().__init__(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Union[str, Any] = hparams.src_lang UpperCAmelCase_: int = hparams.tgt_lang def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> dict: return calculate_bleu(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase_ (lowerCAmelCase__: str , lowerCAmelCase__: Union[str, Any]=None ): """simple docstring""" Path(args.output_dir ).mkdir(exist_ok=lowerCAmelCase__ ) check_output_dir(lowerCAmelCase__ , expected_items=3 ) if model is None: if "summarization" in args.task: UpperCAmelCase_: SummarizationModule = SummarizationModule(lowerCAmelCase__ ) else: UpperCAmelCase_: SummarizationModule = TranslationModule(lowerCAmelCase__ ) UpperCAmelCase_: Dict = Path(args.data_dir ).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir ).startswith("""/tmp""" ) or str(args.output_dir ).startswith("""/var""" ) ): UpperCAmelCase_: List[str] = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger UpperCAmelCase_: Any = os.environ.get("""WANDB_PROJECT""" , lowerCAmelCase__ ) UpperCAmelCase_: Any = WandbLogger(name=model.output_dir.name , project=lowerCAmelCase__ ) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger UpperCAmelCase_: Tuple = WandbLogger(name=model.output_dir.name , project=F'hf_{dataset}' ) if args.early_stopping_patience >= 0: UpperCAmelCase_: str = get_early_stopping_callback(model.val_metric , args.early_stopping_patience ) else: UpperCAmelCase_: Any = False UpperCAmelCase_: List[str] = args.val_metric == """loss""" UpperCAmelCase_: pl.Trainer = generic_train( lowerCAmelCase__ , lowerCAmelCase__ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback( args.output_dir , model.val_metric , args.save_top_k , lowerCAmelCase__ ) , early_stopping_callback=lowerCAmelCase__ , logger=lowerCAmelCase__ , ) pickle_save(model.hparams , model.output_dir / """hparams.pkl""" ) if not args.do_predict: return model UpperCAmelCase_: Union[str, Any] = """""" UpperCAmelCase_: Any = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=lowerCAmelCase__ ) ) if checkpoints: UpperCAmelCase_: str = checkpoints[-1] UpperCAmelCase_: str = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams ) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": a : List[str] = argparse.ArgumentParser() a : List[str] = pl.Trainer.add_argparse_args(parser) a : List[Any] = SummarizationModule.add_model_specific_args(parser, os.getcwd()) a : Optional[Any] = parser.parse_args() main(args)
362
from __future__ import annotations def lowerCAmelCase_ (lowerCAmelCase__: list[float] ): """simple docstring""" UpperCAmelCase_: Union[str, Any] = 0.00 UpperCAmelCase_: List[str] = 0 for resistor in resistors: if resistor <= 0: UpperCAmelCase_: Dict = F'Resistor at index {index} has a negative or zero value!' raise ValueError(lowerCAmelCase__ ) first_sum += 1 / float(lowerCAmelCase__ ) index += 1 return 1 / first_sum def lowerCAmelCase_ (lowerCAmelCase__: list[float] ): """simple docstring""" UpperCAmelCase_: Any = 0.00 UpperCAmelCase_: int = 0 for resistor in resistors: sum_r += resistor if resistor < 0: UpperCAmelCase_: int = F'Resistor at index {index} has a negative value!' raise ValueError(lowerCAmelCase__ ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
82
0
from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def _SCREAMING_SNAKE_CASE ( lowercase : str = "laptop" ): '''simple docstring''' lowerCamelCase_ = f"""https://www.amazon.in/laptop/s?k={product}""" lowerCamelCase_ = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36', 'Accept-Language': 'en-US, en;q=0.5', } lowerCamelCase_ = BeautifulSoup(requests.get(lowercase , headers=lowercase ).text ) # Initialize a Pandas dataframe with the column titles lowerCamelCase_ = DataFrame( columns=[ 'Product Title', 'Product Link', 'Current Price of the product', 'Product Rating', 'MRP of the product', 'Discount', ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( 'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ): try: lowerCamelCase_ = item.ha.text lowerCamelCase_ = 'https://www.amazon.in/' + item.ha.a['href'] lowerCamelCase_ = item.find('span' , attrs={'class': 'a-offscreen'} ).text try: lowerCamelCase_ = item.find('span' , attrs={'class': 'a-icon-alt'} ).text except AttributeError: lowerCamelCase_ = 'Not available' try: lowerCamelCase_ = ( '₹' + item.find( 'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1] ) except AttributeError: lowerCamelCase_ = '' try: lowerCamelCase_ = float( ( ( float(product_mrp.strip('₹' ).replace(',' , '' ) ) - float(product_price.strip('₹' ).replace(',' , '' ) ) ) / float(product_mrp.strip('₹' ).replace(',' , '' ) ) ) * 1_00 ) except ValueError: lowerCamelCase_ = float('nan' ) except AttributeError: pass lowerCamelCase_ = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] lowerCamelCase_ = ' ' lowerCamelCase_ = ' ' data_frame.index += 1 return data_frame if __name__ == "__main__": lowerCamelCase : Tuple = "headphones" get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
204
import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py lowerCamelCase : Union[str, Any] = "src/diffusers" # Matches is_xxx_available() lowerCamelCase : Dict = re.compile(r"is\_([a-z_]*)_available\(\)") # Matches from xxx import bla lowerCamelCase : Union[str, Any] = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") lowerCamelCase : Any = "\n{0} = None\n" lowerCamelCase : List[str] = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n" lowerCamelCase : str = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n" def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ): '''simple docstring''' lowerCamelCase_ = _re_backend.findall(lowercase ) if len(lowercase ) == 0: return None return "_and_".join(lowercase ) def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' with open(os.path.join(lowercase , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f: lowerCamelCase_ = f.readlines() # Get to the point we do the actual imports for type checking lowerCamelCase_ = 0 lowerCamelCase_ = {} # Go through the end of the file while line_index < len(lowercase ): # If the line contains is_backend_available, we grab all objects associated with the `else` block lowerCamelCase_ = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith('else:' ): line_index += 1 line_index += 1 lowerCamelCase_ = [] # Until we unindent, add backend objects to the list while line_index < len(lowercase ) and len(lines[line_index] ) > 1: lowerCamelCase_ = lines[line_index] lowerCamelCase_ = _re_single_line_import.search(lowercase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(lowercase ) > 0: lowerCamelCase_ = objects else: line_index += 1 return backend_specific_objects def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : str ): '''simple docstring''' if name.isupper(): return DUMMY_CONSTANT.format(lowercase ) elif name.islower(): return DUMMY_FUNCTION.format(lowercase , lowercase ) else: return DUMMY_CLASS.format(lowercase , lowercase ) def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int]=None ): '''simple docstring''' if backend_specific_objects is None: lowerCamelCase_ = read_init() # For special correspondence backend to module name as used in the function requires_modulename lowerCamelCase_ = {} for backend, objects in backend_specific_objects.items(): lowerCamelCase_ = '[' + ', '.join(f"""\"{b}\"""" for b in backend.split('_and_' ) ) + ']' lowerCamelCase_ = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n' dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(lowercase , lowercase ) for o in objects] ) lowerCamelCase_ = dummy_file return dummy_files def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int]=False ): '''simple docstring''' lowerCamelCase_ = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py lowerCamelCase_ = {'torch': 'pt'} # Locate actual dummy modules and read their content. lowerCamelCase_ = os.path.join(lowercase , 'utils' ) lowerCamelCase_ = { backend: os.path.join(lowercase , f"""dummy_{short_names.get(lowercase , lowercase )}_objects.py""" ) for backend in dummy_files.keys() } lowerCamelCase_ = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(lowercase ): with open(lowercase , 'r' , encoding='utf-8' , newline='\n' ) as f: lowerCamelCase_ = f.read() else: lowerCamelCase_ = '' for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( f"""Updating diffusers.utils.dummy_{short_names.get(lowercase , lowercase )}_objects.py as the main """ '__init__ has new objects.' ) with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f: f.write(dummy_files[backend] ) else: raise ValueError( 'The main __init__ has objects that are not present in ' f"""diffusers.utils.dummy_{short_names.get(lowercase , lowercase )}_objects.py. Run `make fix-copies` """ 'to fix this.' ) if __name__ == "__main__": lowerCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") lowerCamelCase : Tuple = parser.parse_args() check_dummies(args.fix_and_overwrite)
204
1
"""simple docstring""" import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) def __UpperCAmelCase ( snake_case_ : bool , snake_case_ : bool ) -> Tuple: """simple docstring""" def run_func(snake_case_ : Union[str, Any] ): @wraps(snake_case_ ) def run_in_eager_mode(*snake_case_ : Optional[int] , **snake_case_ : Union[str, Any] ): return func(*snake_case_ , **snake_case_ ) @wraps(snake_case_ ) @tf.function(experimental_compile=snake_case_ ) def run_in_graph_mode(*snake_case_ : Dict , **snake_case_ : Union[str, Any] ): return func(*snake_case_ , **snake_case_ ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> ["tf.Tensor"]: """simple docstring""" _lowerCAmelCase = random.Random() _lowerCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(snake_case_ , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class __lowerCamelCase ( __lowercase ): __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = "TensorFlow" @property def A__ (self ): '''simple docstring''' return tf.__version__ def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return self._measure_speed(_inference ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return self._measure_speed(_train ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase ) _lowerCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return self._measure_memory(_inference ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase ) _lowerCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return self._measure_memory(_train ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _lowerCAmelCase = ( hasattr(lowerCamelCase , """architectures""" ) and isinstance(config.architectures , lowerCamelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] ) _lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = model_cls(lowerCamelCase ) except ImportError: raise ImportError( f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to""" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _lowerCAmelCase = TF_MODEL_MAPPING[config.__class__](lowerCamelCase ) # encoder-decoder has vocab size saved differently _lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size _lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(lowerCamelCase , decoder_input_ids=lowerCamelCase , training=lowerCamelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(lowerCamelCase , training=lowerCamelCase ) _lowerCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" ) if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _lowerCAmelCase = ( hasattr(lowerCamelCase , """architectures""" ) and isinstance(config.architectures , lowerCamelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] ) _lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = model_cls(lowerCamelCase ) except ImportError: raise ImportError( f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to""" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _lowerCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowerCamelCase ) # encoder-decoder has vocab size saved differently _lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size _lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): _lowerCAmelCase = model(lowerCamelCase , decoder_input_ids=lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0] _lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): _lowerCAmelCase = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0] _lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables ) return gradients _lowerCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def A__ (self , lowerCamelCase ): '''simple docstring''' with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" ) timeit.repeat(lowerCamelCase , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average _lowerCAmelCase = timeit.repeat( lowerCamelCase , repeat=self.args.repeat , number=10 , ) return min(lowerCamelCase ) / 10.0 except ResourceExhaustedError as e: self.print_fn(f"""Doesn't fit on GPU. {e}""" ) def A__ (self , lowerCamelCase ): '''simple docstring''' logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""" ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""" ) _lowerCAmelCase = start_memory_tracing("""transformers""" ) if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won't log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""" ) _lowerCAmelCase = """N/A""" else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""" ) # init nvml nvml.nvmlInit() func() _lowerCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) _lowerCAmelCase = nvml.nvmlDeviceGetMemoryInfo(lowerCamelCase ) _lowerCAmelCase = meminfo.used _lowerCAmelCase = Memory(lowerCamelCase ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""" ) _lowerCAmelCase = None else: _lowerCAmelCase = measure_peak_memory_cpu(lowerCamelCase ) _lowerCAmelCase = Memory(lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else memory_bytes if self.args.trace_memory_line_by_line: _lowerCAmelCase = stop_memory_tracing(lowerCamelCase ) if memory is None: _lowerCAmelCase = summary.total else: _lowerCAmelCase = None return memory, summary except ResourceExhaustedError as e: self.print_fn(f"""Doesn't fit on GPU. {e}""" ) return "N/A", None
317
"""simple docstring""" import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device SCREAMING_SNAKE_CASE : List[str] = False class __lowerCamelCase ( unittest.TestCase ): pass @slow @require_torch_gpu class __lowerCamelCase ( unittest.TestCase ): def A__ (self ): '''simple docstring''' _lowerCAmelCase = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) _lowerCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" ) _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = pipe( image=lowerCamelCase , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images _lowerCAmelCase = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _lowerCAmelCase = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
317
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: lowerCamelCase__ : Tuple = None lowerCamelCase__ : List[str] = logging.get_logger(__name__) lowerCamelCase__ : Tuple = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} lowerCamelCase__ : Optional[Any] = { '''vocab_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json''' ), }, } lowerCamelCase__ : List[str] = { '''facebook/nllb-large-en-ro''': 10_24, '''facebook/nllb-200-distilled-600M''': 10_24, } # fmt: off lowerCamelCase__ : Union[str, Any] = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn'''] class _UpperCAmelCase ( __a): __a : Optional[int] = VOCAB_FILES_NAMES __a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a : int = PRETRAINED_VOCAB_FILES_MAP __a : Dict = ["""input_ids""", """attention_mask"""] __a : List[Any] = NllbTokenizer __a : List[int] = [] __a : List[int] = [] def __init__( self , _A=None , _A=None , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=None , _A=None , _A=None , _A=False , **_A , ) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase : str = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token _UpperCAmelCase : Dict = legacy_behaviour super().__init__( vocab_file=_A , tokenizer_file=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , src_lang=_A , tgt_lang=_A , additional_special_tokens=_A , legacy_behaviour=_A , **_A , ) _UpperCAmelCase : Union[str, Any] = vocab_file _UpperCAmelCase : Optional[Any] = False if not self.vocab_file else True _UpperCAmelCase : Dict = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} ) _UpperCAmelCase : List[Any] = { lang_code: self.convert_tokens_to_ids(_A ) for lang_code in FAIRSEQ_LANGUAGE_CODES } _UpperCAmelCase : int = src_lang if src_lang is not None else """eng_Latn""" _UpperCAmelCase : Dict = self.convert_tokens_to_ids(self._src_lang ) _UpperCAmelCase : str = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def __snake_case ( self ) -> str: '''simple docstring''' return self._src_lang @src_lang.setter def __snake_case ( self , _A ) -> None: '''simple docstring''' _UpperCAmelCase : str = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __snake_case ( self , _A , _A = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __snake_case ( self , _A , _A = None ) -> List[int]: '''simple docstring''' _UpperCAmelCase : Tuple = [self.sep_token_id] _UpperCAmelCase : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __snake_case ( self , _A , _A , _A , _A , **_A ) -> str: '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) _UpperCAmelCase : int = src_lang _UpperCAmelCase : Any = self(_A , add_special_tokens=_A , return_tensors=_A , **_A ) _UpperCAmelCase : str = self.convert_tokens_to_ids(_A ) _UpperCAmelCase : Optional[Any] = tgt_lang_id return inputs def __snake_case ( self , _A , _A = "eng_Latn" , _A = None , _A = "fra_Latn" , **_A , ) -> BatchEncoding: '''simple docstring''' _UpperCAmelCase : List[Any] = src_lang _UpperCAmelCase : str = tgt_lang return super().prepare_seqaseq_batch(_A , _A , **_A ) def __snake_case ( self ) -> Tuple: '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def __snake_case ( self ) -> Tuple: '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __snake_case ( self , _A ) -> None: '''simple docstring''' _UpperCAmelCase : int = self.convert_tokens_to_ids(_A ) if self.legacy_behaviour: _UpperCAmelCase : Tuple = [] _UpperCAmelCase : str = [self.eos_token_id, self.cur_lang_code] else: _UpperCAmelCase : int = [self.cur_lang_code] _UpperCAmelCase : int = [self.eos_token_id] _UpperCAmelCase : int = self.convert_ids_to_tokens(self.prefix_tokens ) _UpperCAmelCase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens ) _UpperCAmelCase : Tuple = processors.TemplateProcessing( single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __snake_case ( self , _A ) -> None: '''simple docstring''' _UpperCAmelCase : Optional[Any] = self.convert_tokens_to_ids(_A ) if self.legacy_behaviour: _UpperCAmelCase : Optional[Any] = [] _UpperCAmelCase : int = [self.eos_token_id, self.cur_lang_code] else: _UpperCAmelCase : Dict = [self.cur_lang_code] _UpperCAmelCase : int = [self.eos_token_id] _UpperCAmelCase : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens ) _UpperCAmelCase : Tuple = self.convert_ids_to_tokens(self.suffix_tokens ) _UpperCAmelCase : Tuple = processors.TemplateProcessing( single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __snake_case ( self , _A , _A = None ) -> Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(_A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' ) return _UpperCAmelCase : List[str] = os.path.join( _A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ): copyfile(self.vocab_file , _A ) return (out_vocab_file,)
246
"""simple docstring""" import math lowerCamelCase__ : List[Any] = 10 lowerCamelCase__ : Optional[int] = 7 lowerCamelCase__ : Dict = BALLS_PER_COLOUR * NUM_COLOURS def UpperCamelCase ( _lowerCAmelCase : int = 20 ) -> str: _UpperCAmelCase : List[str] = math.comb(_lowerCAmelCase, _lowerCAmelCase ) _UpperCAmelCase : Optional[int] = math.comb(NUM_BALLS - BALLS_PER_COLOUR, _lowerCAmelCase ) _UpperCAmelCase : List[str] = NUM_COLOURS * (1 - missing_colour / total) return f'''{result:.9f}''' if __name__ == "__main__": print(solution(20))
246
1
import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append('.') def snake_case (__lowercase ) -> int: '''simple docstring''' _snake_case : List[Any] = test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( "`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got " F"""{test_file} instead.""" ) _snake_case : List[str] = components[-1] if not test_fn.endswith("py" ): raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" ) if not test_fn.startswith("test_modeling_" ): raise ValueError( F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" ) _snake_case : int = components[:-1] + [test_fn.replace(".py" , "" )] _snake_case : Any = ".".join(__lowercase ) return test_module_path def snake_case (__lowercase ) -> Optional[int]: '''simple docstring''' _snake_case : Tuple = get_module_path(__lowercase ) _snake_case : List[str] = importlib.import_module(__lowercase ) return test_module def snake_case (__lowercase ) -> List[Any]: '''simple docstring''' _snake_case : Optional[int] = [] _snake_case : List[Any] = get_test_module(__lowercase ) for attr in dir(__lowercase ): if attr.endswith("ModelTester" ): tester_classes.append(getattr(__lowercase , __lowercase ) ) # sort with class names return sorted(__lowercase , key=lambda __lowercase : x.__name__ ) def snake_case (__lowercase ) -> Dict: '''simple docstring''' _snake_case : Dict = [] _snake_case : List[str] = get_test_module(__lowercase ) for attr in dir(__lowercase ): _snake_case : Tuple = getattr(__lowercase , __lowercase ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). _snake_case : List[str] = getattr(__lowercase , "all_model_classes" , [] ) if len(__lowercase ) > 0: test_classes.append(__lowercase ) # sort with class names return sorted(__lowercase , key=lambda __lowercase : x.__name__ ) def snake_case (__lowercase ) -> Optional[int]: '''simple docstring''' _snake_case : Optional[int] = get_test_classes(__lowercase ) _snake_case : str = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(__lowercase , key=lambda __lowercase : x.__name__ ) def snake_case (__lowercase ) -> Any: '''simple docstring''' _snake_case : List[Any] = test_class() if hasattr(__lowercase , "setUp" ): test.setUp() _snake_case : Optional[int] = None if hasattr(__lowercase , "model_tester" ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: _snake_case : Dict = test.model_tester.__class__ return model_tester def snake_case (__lowercase , __lowercase ) -> int: '''simple docstring''' _snake_case : List[str] = get_test_classes(__lowercase ) _snake_case : Any = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(__lowercase ) # sort with class names return sorted(__lowercase , key=lambda __lowercase : x.__name__ ) def snake_case (__lowercase , __lowercase ) -> Union[str, Any]: '''simple docstring''' _snake_case : int = get_test_classes_for_model(__lowercase , __lowercase ) _snake_case : Any = [] for test_class in test_classes: _snake_case : Dict = get_model_tester_from_test_class(__lowercase ) if tester_class is not None: tester_classes.append(__lowercase ) # sort with class names return sorted(__lowercase , key=lambda __lowercase : x.__name__ ) def snake_case (__lowercase ) -> str: '''simple docstring''' _snake_case : Optional[Any] = get_test_classes(__lowercase ) _snake_case : Any = {test_class: get_model_tester_from_test_class(__lowercase ) for test_class in test_classes} return test_tester_mapping def snake_case (__lowercase ) -> Tuple: '''simple docstring''' _snake_case : Optional[int] = get_model_classes(__lowercase ) _snake_case : Tuple = { model_class: get_test_classes_for_model(__lowercase , __lowercase ) for model_class in model_classes } return model_test_mapping def snake_case (__lowercase ) -> Any: '''simple docstring''' _snake_case : Tuple = get_model_classes(__lowercase ) _snake_case : List[str] = { model_class: get_tester_classes_for_model(__lowercase , __lowercase ) for model_class in model_classes } return model_to_tester_mapping def snake_case (__lowercase ) -> Optional[Any]: '''simple docstring''' if isinstance(__lowercase , __lowercase ): return o elif isinstance(__lowercase , __lowercase ): return o.__name__ elif isinstance(__lowercase , (list, tuple) ): return [to_json(__lowercase ) for x in o] elif isinstance(__lowercase , __lowercase ): return {to_json(__lowercase ): to_json(__lowercase ) for k, v in o.items()} else: return o
364
def snake_case (__lowercase , __lowercase ) -> str: '''simple docstring''' _snake_case : Tuple = "" for word_or_phrase in separated: if not isinstance(__lowercase , __lowercase ): raise Exception("join() accepts only strings to be joined" ) joined += word_or_phrase + separator return joined.strip(__lowercase ) if __name__ == "__main__": from doctest import testmod testmod()
284
0
import pprint import requests __lowerCamelCase = """https://zenquotes.io/api""" def UpperCamelCase ( ): return requests.get(API_ENDPOINT_URL + "/today" ).json() def UpperCamelCase ( ): return requests.get(API_ENDPOINT_URL + "/random" ).json() if __name__ == "__main__": __lowerCamelCase = random_quotes() pprint.pprint(response)
59
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = ["""XGLMTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = ["""XGLMTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ """XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """XGLMForCausalLM""", """XGLMModel""", """XGLMPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ """FlaxXGLMForCausalLM""", """FlaxXGLMModel""", """FlaxXGLMPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ """TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXGLMForCausalLM""", """TFXGLMModel""", """TFXGLMPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys __lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
59
1
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() lowercase__ : Optional[Any] = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) lowercase__ : List[str] = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""")) rename_keys.append( (F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""", F"""decoder.layers.{i}.encoder_attn.out_proj.weight""", ) ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""", F"""decoder.layers.{i}.encoder_attn.out_proj.bias""", ) ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""")) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ('input_proj.weight', 'input_projection.weight'), ('input_proj.bias', 'input_projection.bias'), ('query_embed.weight', 'query_position_embeddings.weight'), ('transformer.encoder.norm.weight', 'encoder.layernorm.weight'), ('transformer.encoder.norm.bias', 'encoder.layernorm.bias'), ('transformer.decoder.norm.weight', 'decoder.layernorm.weight'), ('transformer.decoder.norm.bias', 'decoder.layernorm.bias'), ('class_embed.weight', 'class_labels_classifier.weight'), ('class_embed.bias', 'class_labels_classifier.bias'), ('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'), ('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'), ('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'), ('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'), ('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'), ('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'), ] ) def a__ ( lowercase : str, lowercase : Optional[Any], lowercase : int ) -> Optional[int]: """simple docstring""" _UpperCamelCase = state_dict.pop(lowercase ) _UpperCamelCase = val def a__ ( lowercase : List[Any] ) -> Any: """simple docstring""" _UpperCamelCase = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: _UpperCamelCase = key.replace('''backbone.0.body''', '''backbone.conv_encoder.model''' ) _UpperCamelCase = value else: _UpperCamelCase = value return new_state_dict def a__ ( lowercase : Dict ) -> str: """simple docstring""" _UpperCamelCase = '''''' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) _UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) _UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict _UpperCamelCase = in_proj_weight[:256, :] _UpperCamelCase = in_proj_bias[:256] _UpperCamelCase = in_proj_weight[256:512, :] _UpperCamelCase = in_proj_bias[256:512] _UpperCamelCase = in_proj_weight[-256:, :] _UpperCamelCase = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention _UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" ) _UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict _UpperCamelCase = in_proj_weight[:256, :] _UpperCamelCase = in_proj_bias[:256] _UpperCamelCase = in_proj_weight[256:512, :] _UpperCamelCase = in_proj_bias[256:512] _UpperCamelCase = in_proj_weight[-256:, :] _UpperCamelCase = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention _UpperCamelCase = state_dict.pop( F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" ) _UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) of cross-attention to the state dict _UpperCamelCase = in_proj_weight_cross_attn[:256, :] _UpperCamelCase = in_proj_bias_cross_attn[:256] _UpperCamelCase = in_proj_weight_cross_attn[256:512, :] _UpperCamelCase = in_proj_bias_cross_attn[256:512] _UpperCamelCase = in_proj_weight_cross_attn[-256:, :] _UpperCamelCase = in_proj_bias_cross_attn[-256:] def a__ ( lowercase : int, lowercase : Tuple ) -> List[Any]: """simple docstring""" _UpperCamelCase , _UpperCamelCase = image.size _UpperCamelCase = max(lowercase, lowercase ) _UpperCamelCase = 800 if '''detection''' in checkpoint_url else 1000 _UpperCamelCase = target_max_size / current_max_size _UpperCamelCase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def a__ ( lowercase : int ) -> str: """simple docstring""" _UpperCamelCase = F.to_tensor(lowercase ) _UpperCamelCase = F.normalize(lowercase, mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ) return image @torch.no_grad() def a__ ( lowercase : Dict, lowercase : List[str], lowercase : List[Any] ) -> Dict: """simple docstring""" logger.info('''Converting model...''' ) # load original state dict _UpperCamelCase = torch.hub.load_state_dict_from_url(lowercase, map_location='''cpu''' ) # rename keys for src, dest in rename_keys: rename_key(lowercase, lowercase, lowercase ) _UpperCamelCase = rename_backbone_keys(lowercase ) # query, key and value matrices need special treatment read_in_q_k_v(lowercase ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them _UpperCamelCase = '''model.''' for key in state_dict.copy().keys(): if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ): _UpperCamelCase = state_dict.pop(lowercase ) _UpperCamelCase = val # create HuggingFace model and load state dict _UpperCamelCase = TableTransformerConfig( backbone='''resnet18''', mask_loss_coefficient=1, dice_loss_coefficient=1, ce_loss_coefficient=1, bbox_loss_coefficient=5, giou_loss_coefficient=2, eos_coefficient=0.4, class_cost=1, bbox_cost=5, giou_cost=2, ) if "detection" in checkpoint_url: _UpperCamelCase = 15 _UpperCamelCase = 2 _UpperCamelCase = {0: '''table''', 1: '''table rotated'''} _UpperCamelCase = idalabel _UpperCamelCase = {v: k for k, v in idalabel.items()} else: _UpperCamelCase = 125 _UpperCamelCase = 6 _UpperCamelCase = { 0: '''table''', 1: '''table column''', 2: '''table row''', 3: '''table column header''', 4: '''table projected row header''', 5: '''table spanning cell''', } _UpperCamelCase = idalabel _UpperCamelCase = {v: k for k, v in idalabel.items()} _UpperCamelCase = DetrImageProcessor( format='''coco_detection''', max_size=800 if '''detection''' in checkpoint_url else 1000 ) _UpperCamelCase = TableTransformerForObjectDetection(lowercase ) model.load_state_dict(lowercase ) model.eval() # verify our conversion _UpperCamelCase = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png''' _UpperCamelCase = hf_hub_download(repo_id='''nielsr/example-pdf''', repo_type='''dataset''', filename=lowercase ) _UpperCamelCase = Image.open(lowercase ).convert('''RGB''' ) _UpperCamelCase = normalize(resize(lowercase, lowercase ) ).unsqueeze(0 ) _UpperCamelCase = model(lowercase ) if "detection" in checkpoint_url: _UpperCamelCase = (1, 15, 3) _UpperCamelCase = torch.tensor( [[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] ) _UpperCamelCase = torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] ) else: _UpperCamelCase = (1, 125, 7) _UpperCamelCase = torch.tensor( [[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] ) _UpperCamelCase = torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3], lowercase, atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3], lowercase, atol=1e-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(lowercase ).mkdir(exist_ok=lowercase ) model.save_pretrained(lowercase ) image_processor.save_pretrained(lowercase ) if push_to_hub: # Push model to HF hub logger.info('''Pushing model to the hub...''' ) _UpperCamelCase = ( '''microsoft/table-transformer-detection''' if '''detection''' in checkpoint_url else '''microsoft/table-transformer-structure-recognition''' ) model.push_to_hub(lowercase ) image_processor.push_to_hub(lowercase ) if __name__ == "__main__": lowercase__ : List[Any] = argparse.ArgumentParser() parser.add_argument( '--checkpoint_url', default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth', type=str, choices=[ 'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth', 'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth', ], help='URL of the Table Transformer checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) lowercase__ : List[Any] = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
287
'''simple docstring''' import importlib.metadata from typing import Union from packaging.version import Version, parse from .constants import STR_OPERATION_TO_FUNC lowercase__ : List[Any] = parse(importlib.metadata.version('torch')) def a__ ( lowercase : Union[str, Version], lowercase : str, lowercase : str ) -> List[str]: """simple docstring""" if operation not in STR_OPERATION_TO_FUNC.keys(): raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" ) _UpperCamelCase = STR_OPERATION_TO_FUNC[operation] if isinstance(lowercase, lowercase ): _UpperCamelCase = parse(importlib.metadata.version(lowercase ) ) return operation(lowercase, parse(lowercase ) ) def a__ ( lowercase : str, lowercase : str ) -> List[Any]: """simple docstring""" return compare_versions(lowercase, lowercase, lowercase )
287
1
import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List import torch from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params lowerCamelCase : int = getLogger(__name__) lowerCamelCase : Union[str, Any] = 'cuda' if torch.cuda.is_available() else 'cpu' def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase = 8 ,lowercase = DEFAULT_DEVICE ,lowercase=False ,lowercase="summarization" ,lowercase=None ,**lowercase ,) -> Dict: snake_case : Union[str, Any] = Path(lowercase ).open("""w""" ,encoding="""utf-8""" ) snake_case : Any = str(lowercase ) snake_case : Tuple = AutoModelForSeqaSeqLM.from_pretrained(lowercase ).to(lowercase ) if fpaa: snake_case : Tuple = model.half() snake_case : str = AutoTokenizer.from_pretrained(lowercase ) logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type. snake_case : Optional[Any] = time.time() # update config with task specific params use_task_specific_params(lowercase ,lowercase ) if prefix is None: snake_case : Dict = prefix or getattr(model.config ,"""prefix""" ,"""""" ) or """""" for examples_chunk in tqdm(list(chunks(lowercase ,lowercase ) ) ): snake_case : Union[str, Any] = [prefix + text for text in examples_chunk] snake_case : Tuple = tokenizer(lowercase ,return_tensors="""pt""" ,truncation=lowercase ,padding="""longest""" ).to(lowercase ) snake_case : str = model.generate( input_ids=batch.input_ids ,attention_mask=batch.attention_mask ,**lowercase ,) snake_case : str = tokenizer.batch_decode(lowercase ,skip_special_tokens=lowercase ,clean_up_tokenization_spaces=lowercase ) for hypothesis in dec: fout.write(hypothesis + """\n""" ) fout.flush() fout.close() snake_case : List[str] = int(time.time() - start_time ) # seconds snake_case : Dict = len(lowercase ) return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs ,4 )} def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]: return datetime.datetime.now().strftime("""%Y-%m-%d %H:%M:%S""" ) def SCREAMING_SNAKE_CASE__ ( lowercase=True ) -> Optional[int]: snake_case : Optional[int] = argparse.ArgumentParser() parser.add_argument("""model_name""" ,type=lowercase ,help="""like facebook/bart-large-cnn,t5-base, etc.""" ) parser.add_argument("""input_path""" ,type=lowercase ,help="""like cnn_dm/test.source""" ) parser.add_argument("""save_path""" ,type=lowercase ,help="""where to save summaries""" ) parser.add_argument("""--reference_path""" ,type=lowercase ,required=lowercase ,help="""like cnn_dm/test.target""" ) parser.add_argument("""--score_path""" ,type=lowercase ,required=lowercase ,default="""metrics.json""" ,help="""where to save metrics""" ) parser.add_argument("""--device""" ,type=lowercase ,required=lowercase ,default=lowercase ,help="""cuda, cuda:1, cpu etc.""" ) parser.add_argument( """--prefix""" ,type=lowercase ,required=lowercase ,default=lowercase ,help="""will be added to the begininng of src examples""" ) parser.add_argument("""--task""" ,type=lowercase ,default="""summarization""" ,help="""used for task_specific_params + metrics""" ) parser.add_argument("""--bs""" ,type=lowercase ,default=8 ,required=lowercase ,help="""batch size""" ) parser.add_argument( """--n_obs""" ,type=lowercase ,default=-1 ,required=lowercase ,help="""How many observations. Defaults to all.""" ) parser.add_argument("""--fp16""" ,action="""store_true""" ) parser.add_argument("""--dump-args""" ,action="""store_true""" ,help="""print the custom hparams with the results""" ) parser.add_argument( """--info""" ,nargs="""?""" ,type=lowercase ,const=datetime_now() ,help=( """use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g.""" """ lang=en-ru. If no value is passed, the current datetime string will be used.""" ) ,) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate snake_case , snake_case : Any = parser.parse_known_args() snake_case : str = parse_numeric_n_bool_cl_kwargs(lowercase ) if parsed_args and verbose: print(f"""parsed the following generate kwargs: {parsed_args}""" ) snake_case : Optional[Any] = [""" """ + x.rstrip() if """t5""" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()] if args.n_obs > 0: snake_case : str = examples[: args.n_obs] Path(args.save_path ).parent.mkdir(exist_ok=lowercase ) if args.reference_path is None and Path(args.score_path ).exists(): warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" ) if args.device == "cpu" and args.fpaa: # this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half' raise ValueError("""Can't mix --fp16 and --device cpu""" ) snake_case : Optional[Any] = generate_summaries_or_translations( lowercase ,args.save_path ,args.model_name ,batch_size=args.bs ,device=args.device ,fpaa=args.fpaa ,task=args.task ,prefix=args.prefix ,**lowercase ,) if args.reference_path is None: return {} # Compute scores snake_case : Optional[int] = calculate_bleu if """translation""" in args.task else calculate_rouge snake_case : Any = [x.rstrip() for x in open(args.save_path ).readlines()] snake_case : List[str] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(lowercase )] snake_case : dict = score_fn(lowercase ,lowercase ) scores.update(lowercase ) if args.dump_args: scores.update(lowercase ) if args.info: snake_case : int = args.info if verbose: print(lowercase ) if args.score_path is not None: json.dump(lowercase ,open(args.score_path ,"""w""" ) ) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
124
def SCREAMING_SNAKE_CASE__ ( ) -> int: return [ a * b * (1000 - a - b) for a in range(1 ,999 ) for b in range(lowercase ,999 ) if (a * a + b * b == (1000 - a - b) ** 2) ][0] if __name__ == "__main__": print(f"""{solution() = }""")
124
1
"""simple docstring""" import copy import json import os import tempfile from transformers import is_torch_available from .test_configuration_utils import config_common_kwargs class _lowerCAmelCase ( __snake_case ): """simple docstring""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :List[str] = parent lowerCAmelCase__ :Tuple = config_class lowerCAmelCase__ :List[str] = has_text_modality lowerCAmelCase__ :List[str] = kwargs lowerCAmelCase__ :Optional[int] = common_properties def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = self.config_class(**self.inputs_dict ) lowerCAmelCase__ :Union[str, Any] = ( ['hidden_size', 'num_attention_heads', 'num_hidden_layers'] if self.common_properties is None else self.common_properties ) # Add common fields for text models if self.has_text_modality: common_properties.extend(['vocab_size'] ) # Test that config has the common properties as getters for prop in common_properties: self.parent.assertTrue(hasattr(__UpperCAmelCase , __UpperCAmelCase ) , msg=F"`{prop}` does not exist" ) # Test that config has the common properties as setter for idx, name in enumerate(__UpperCAmelCase ): try: setattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) self.parent.assertEqual( getattr(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase , msg=F"`{name} value {idx} expected, but was {getattr(__UpperCAmelCase , __UpperCAmelCase )}" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass # Test if config class can be called with Config(prop_name=..) for idx, name in enumerate(__UpperCAmelCase ): try: lowerCAmelCase__ :int = self.config_class(**{name: idx} ) self.parent.assertEqual( getattr(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase , msg=F"`{name} value {idx} expected, but was {getattr(__UpperCAmelCase , __UpperCAmelCase )}" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :str = self.config_class(**self.inputs_dict ) lowerCAmelCase__ :List[str] = json.loads(config.to_json_string() ) for key, value in self.inputs_dict.items(): self.parent.assertEqual(obj[key] , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ :int = os.path.join(__UpperCAmelCase , 'config.json' ) config_first.to_json_file(__UpperCAmelCase ) lowerCAmelCase__ :Dict = self.config_class.from_json_file(__UpperCAmelCase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :Tuple = self.config_class.from_pretrained(__UpperCAmelCase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Dict = self.config_class(**self.inputs_dict ) lowerCAmelCase__ :List[str] = 'test' with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ :List[str] = os.path.join(__UpperCAmelCase , __UpperCAmelCase ) config_first.save_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :str = self.config_class.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = self.config_class(**self.inputs_dict , num_labels=5 ) self.parent.assertEqual(len(config.idalabel ) , 5 ) self.parent.assertEqual(len(config.labelaid ) , 5 ) lowerCAmelCase__ :Dict = 3 self.parent.assertEqual(len(config.idalabel ) , 3 ) self.parent.assertEqual(len(config.labelaid ) , 3 ) def snake_case ( self ): '''simple docstring''' if self.config_class.is_composition: return lowerCAmelCase__ :Union[str, Any] = self.config_class() self.parent.assertIsNotNone(__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Dict = copy.deepcopy(__UpperCAmelCase ) lowerCAmelCase__ :Dict = self.config_class(**__UpperCAmelCase ) lowerCAmelCase__ :str = [] for key, value in config_common_kwargs.items(): if key == "torch_dtype": if not is_torch_available(): continue else: import torch if config.torch_dtype != torch.floataa: wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa) ) elif getattr(__UpperCAmelCase , __UpperCAmelCase ) != value: wrong_values.append((key, getattr(__UpperCAmelCase , __UpperCAmelCase ), value) ) if len(__UpperCAmelCase ) > 0: lowerCAmelCase__ :str = '\n'.join([F"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] ) raise ValueError(F"The following keys were not properly set in the config:\n{errors}" ) def snake_case ( self ): '''simple docstring''' self.create_and_test_config_common_properties() self.create_and_test_config_to_json_string() self.create_and_test_config_to_json_file() self.create_and_test_config_from_and_save_pretrained() self.create_and_test_config_from_and_save_pretrained_subfolder() self.create_and_test_config_with_num_labels() self.check_config_can_be_init_without_params() self.check_config_arguments_init()
362
"""simple docstring""" from ..utils import DummyObject, requires_backends class _lowerCAmelCase ( metaclass=a ): """simple docstring""" __magic_name__ :Optional[Any] = ["""onnx"""] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' requires_backends(self , ['onnx'] ) @classmethod def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' requires_backends(cls , ['onnx'] ) @classmethod def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' requires_backends(cls , ['onnx'] )
254
0
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''google/vivit-b-16x2-kinetics400''': ( '''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json''' ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = "vivit" def __init__( self : int , _UpperCamelCase : Union[str, Any]=2_2_4 , _UpperCamelCase : List[str]=3_2 , _UpperCamelCase : Optional[Any]=[2, 1_6, 1_6] , _UpperCamelCase : Dict=3 , _UpperCamelCase : int=7_6_8 , _UpperCamelCase : Any=1_2 , _UpperCamelCase : Dict=1_2 , _UpperCamelCase : Union[str, Any]=3_0_7_2 , _UpperCamelCase : List[Any]="gelu_fast" , _UpperCamelCase : str=0.0 , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : str=0.02 , _UpperCamelCase : Optional[int]=1e-06 , _UpperCamelCase : Any=True , **_UpperCamelCase : List[Any] , ) ->int: snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = image_size snake_case_ = num_frames snake_case_ = tubelet_size snake_case_ = num_channels snake_case_ = qkv_bias super().__init__(**_UpperCamelCase )
8
"""simple docstring""" from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch lowerCamelCase_ : str = logging.get_logger(__name__) @add_end_docstrings( UpperCAmelCase__ , r""" top_k (`int`, defaults to 5): The number of predictions to return. targets (`str` or `List[str]`, *optional*): When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower). """ , ) class _UpperCAmelCase ( UpperCAmelCase__ ): '''simple docstring''' def lowerCamelCase_ ( self , snake_case_ ): """simple docstring""" if self.framework == "tf": A_ : str = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": A_ : List[str] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case_ ) else: raise ValueError('Unsupported framework' ) return masked_index def lowerCamelCase_ ( self , snake_case_ ): """simple docstring""" A_ : List[str] = self.get_masked_index(snake_case_ ) A_ : str = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( 'fill-mask' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , ) def lowerCamelCase_ ( self , snake_case_ ): """simple docstring""" if isinstance(snake_case_ , snake_case_ ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input['input_ids'][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(snake_case_ ) def lowerCamelCase_ ( self , snake_case_ , snake_case_=None , **snake_case_ ): """simple docstring""" if return_tensors is None: A_ : Any = self.framework A_ : Dict = self.tokenizer(snake_case_ , return_tensors=snake_case_ ) self.ensure_exactly_one_mask_token(snake_case_ ) return model_inputs def lowerCamelCase_ ( self , snake_case_ ): """simple docstring""" A_ : Dict = self.model(**snake_case_ ) A_ : Optional[int] = model_inputs['input_ids'] return model_outputs def lowerCamelCase_ ( self , snake_case_ , snake_case_=5 , snake_case_=None ): """simple docstring""" if target_ids is not None and target_ids.shape[0] < top_k: A_ : str = target_ids.shape[0] A_ : Optional[Any] = model_outputs['input_ids'][0] A_ : List[Any] = model_outputs['logits'] if self.framework == "tf": A_ : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] A_ : Union[str, Any] = outputs.numpy() A_ : Optional[int] = outputs[0, masked_index, :] A_ : Optional[Any] = stable_softmax(snake_case_ , axis=-1 ) if target_ids is not None: A_ : Union[str, Any] = tf.gather_nd(tf.squeeze(snake_case_ , 0 ) , target_ids.reshape(-1 , 1 ) ) A_ : Optional[int] = tf.expand_dims(snake_case_ , 0 ) A_ : Any = tf.math.top_k(snake_case_ , k=snake_case_ ) A_ , A_ : str = topk.values.numpy(), topk.indices.numpy() else: A_ : int = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case_ ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample A_ : Tuple = outputs[0, masked_index, :] A_ : List[str] = logits.softmax(dim=-1 ) if target_ids is not None: A_ : str = probs[..., target_ids] A_ , A_ : List[str] = probs.topk(snake_case_ ) A_ : List[Any] = [] A_ : int = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): A_ : str = [] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place A_ : Union[str, Any] = input_ids.numpy().copy() if target_ids is not None: A_ : str = target_ids[p].tolist() A_ : Union[str, Any] = p # Filter padding out: A_ : Any = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back A_ : Any = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ ) A_ : Any = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence} row.append(snake_case_ ) result.append(snake_case_ ) if single_mask: return result[0] return result def lowerCamelCase_ ( self , snake_case_ , snake_case_=None ): """simple docstring""" if isinstance(snake_case_ , snake_case_ ): A_ : List[str] = [targets] try: A_ : Optional[int] = self.tokenizer.get_vocab() except Exception: A_ : int = {} A_ : Tuple = [] for target in targets: A_ : int = vocab.get(snake_case_ , snake_case_ ) if id_ is None: A_ : Tuple = self.tokenizer( snake_case_ , add_special_tokens=snake_case_ , return_attention_mask=snake_case_ , return_token_type_ids=snake_case_ , max_length=1 , truncation=snake_case_ , )['input_ids'] if len(snake_case_ ) == 0: logger.warning( F"""The specified target token `{target}` does not exist in the model vocabulary. """ 'We cannot replace it with anything meaningful, ignoring it' ) continue A_ : str = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( F"""The specified target token `{target}` does not exist in the model vocabulary. """ F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" ) target_ids.append(id_ ) A_ : Tuple = list(set(snake_case_ ) ) if len(snake_case_ ) == 0: raise ValueError('At least one target must be provided when passed.' ) A_ : Optional[Any] = np.array(snake_case_ ) return target_ids def lowerCamelCase_ ( self , snake_case_=None , snake_case_=None ): """simple docstring""" A_ : List[str] = {} if targets is not None: A_ : Any = self.get_target_ids(snake_case_ , snake_case_ ) A_ : Optional[Any] = target_ids if top_k is not None: A_ : int = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( 'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.' ) return {}, {}, postprocess_params def __call__( self , snake_case_ , *snake_case_ , **snake_case_ ): """simple docstring""" A_ : List[str] = super().__call__(snake_case_ , **snake_case_ ) if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) == 1: return outputs[0] return outputs
286
0
def A__ ( __lowerCamelCase ): if a < 0: raise ValueError('''Input value must be a positive integer''' ) elif isinstance(_A, _A ): raise TypeError('''Input value must be a \'int\' type''' ) return bin(_A ).count('''1''' ) if __name__ == "__main__": import doctest doctest.testmod()
353
from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": __UpperCAmelCase = input("Enter image url: ").strip() print(F"""Downloading image from {url} ...""") __UpperCAmelCase = BeautifulSoup(requests.get(url).content, "html.parser") # The image URL is in the content field of the first meta tag with property og:image __UpperCAmelCase = soup.find("meta", {"property": "og:image"})["content"] __UpperCAmelCase = requests.get(image_url).content __UpperCAmelCase = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg""" with open(file_name, "wb") as fp: fp.write(image_data) print(F"""Done. Image saved to disk as {file_name}.""")
257
0
from __future__ import annotations from random import choice def _a ( SCREAMING_SNAKE_CASE_ : List[str] ): return choice(SCREAMING_SNAKE_CASE_ ) def _a ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ): __lowerCAmelCase = random_pivot(SCREAMING_SNAKE_CASE_ ) # partition based on pivot # linear time __lowerCAmelCase = [e for e in lst if e < pivot] __lowerCAmelCase = [e for e in lst if e > pivot] # if we get lucky, pivot might be the element we want. # we can easily see this: # small (elements smaller than k) # + pivot (kth element) # + big (elements larger than k) if len(SCREAMING_SNAKE_CASE_ ) == k - 1: return pivot # pivot is in elements bigger than k elif len(SCREAMING_SNAKE_CASE_ ) < k - 1: return kth_number(SCREAMING_SNAKE_CASE_ , k - len(SCREAMING_SNAKE_CASE_ ) - 1 ) # pivot is in elements smaller than k else: return kth_number(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": import doctest doctest.testmod()
92
import argparse import os import re import packaging.version UpperCamelCase__ = """examples/""" UpperCamelCase__ = { """examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""), """init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""), """setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""), """doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""), } UpperCamelCase__ = { """init""": """src/transformers/__init__.py""", """setup""": """setup.py""", } UpperCamelCase__ = """README.md""" def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] ): with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f: __lowerCAmelCase = f.read() __lowerCAmelCase , __lowerCAmelCase = REPLACE_PATTERNS[pattern] __lowerCAmelCase = replace.replace("VERSION" , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = re_pattern.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ): for folder, directories, fnames in os.walk(SCREAMING_SNAKE_CASE_ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("research_projects" ) if "legacy" in directories: directories.remove("legacy" ) for fname in fnames: if fname.endswith(".py" ): update_version_in_file(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , pattern="examples" ) def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int]=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if not patch: update_version_in_examples(SCREAMING_SNAKE_CASE_ ) def _a ( ): __lowerCAmelCase = "🤗 Transformers currently provides the following architectures" __lowerCAmelCase = "1. Want to contribute a new model?" with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f: __lowerCAmelCase = f.readlines() # Find the start of the list. __lowerCAmelCase = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 __lowerCAmelCase = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("1." ): __lowerCAmelCase = lines[index].replace( "https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , ) index += 1 with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(SCREAMING_SNAKE_CASE_ ) def _a ( ): with open(REPLACE_FILES["init"] , "r" ) as f: __lowerCAmelCase = f.read() __lowerCAmelCase = REPLACE_PATTERNS["init"][0].search(SCREAMING_SNAKE_CASE_ ).groups()[0] return packaging.version.parse(SCREAMING_SNAKE_CASE_ ) def _a ( SCREAMING_SNAKE_CASE_ : List[Any]=False ): __lowerCAmelCase = get_version() if patch and default_version.is_devrelease: raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" ) if default_version.is_devrelease: __lowerCAmelCase = default_version.base_version elif patch: __lowerCAmelCase = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: __lowerCAmelCase = F"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. __lowerCAmelCase = input(F"""Which version are you releasing? [{default_version}]""" ) if len(SCREAMING_SNAKE_CASE_ ) == 0: __lowerCAmelCase = default_version print(F"""Updating version to {version}.""" ) global_version_update(SCREAMING_SNAKE_CASE_ , patch=SCREAMING_SNAKE_CASE_ ) if not patch: print("Cleaning main README, don't forget to run `make fix-copies`." ) clean_main_ref_in_model_list() def _a ( ): __lowerCAmelCase = get_version() __lowerCAmelCase = F"""{current_version.major}.{current_version.minor + 1}.0.dev0""" __lowerCAmelCase = current_version.base_version # Check with the user we got that right. __lowerCAmelCase = input(F"""Which version are we developing now? [{dev_version}]""" ) if len(SCREAMING_SNAKE_CASE_ ) == 0: __lowerCAmelCase = dev_version print(F"""Updating version to {version}.""" ) global_version_update(SCREAMING_SNAKE_CASE_ ) print("Cleaning main README, don't forget to run `make fix-copies`." ) clean_main_ref_in_model_list() if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""") parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""") UpperCamelCase__ = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("""Nothing to do after a patch :-)""") else: post_release_work()
92
1
def __lowerCAmelCase ( a__ , a__ ) -> float: return price * (1 + tax_rate) if __name__ == "__main__": print(F"{price_plus_tax(1_0_0, 0.25) = }") print(F"{price_plus_tax(125.50, 0.05) = }")
369
from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> np.ndarray: __a = cva.getAffineTransform(a__ , a__ ) return cva.warpAffine(a__ , a__ , (rows, cols) ) if __name__ == "__main__": # read original image A : List[Any] = cva.imread( str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg') ) # turn image in gray scale value A : Any = cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape A , A : List[Any] = gray_img.shape # set different points to rotate image A : str = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa) A : Union[str, Any] = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa) A : Tuple = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa) A : Tuple = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa) # add all rotated images in a list A : Tuple = [ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations A : Union[str, Any] = plt.figure(1) A : str = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3'] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray') plt.title(titles[i]) plt.axis('off') plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95) plt.show()
33
0
import numpy as np from transformers import Pipeline def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = np.max(SCREAMING_SNAKE_CASE__ , axis=-1 , keepdims=SCREAMING_SNAKE_CASE__ ) snake_case_ = np.exp(outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=SCREAMING_SNAKE_CASE__ ) class snake_case_ ( __A ): '''simple docstring''' def snake_case__( self : Tuple , **_UpperCamelCase : Tuple ) ->Optional[int]: snake_case_ = {} if "second_text" in kwargs: snake_case_ = kwargs['''second_text'''] return preprocess_kwargs, {}, {} def snake_case__( self : int , _UpperCamelCase : int , _UpperCamelCase : List[Any]=None ) ->Tuple: return self.tokenizer(_UpperCamelCase , text_pair=_UpperCamelCase , return_tensors=self.framework ) def snake_case__( self : List[str] , _UpperCamelCase : int ) ->str: return self.model(**_UpperCamelCase ) def snake_case__( self : str , _UpperCamelCase : List[Any] ) ->Tuple: snake_case_ = model_outputs.logits[0].numpy() snake_case_ = softmax(_UpperCamelCase ) snake_case_ = np.argmax(_UpperCamelCase ) snake_case_ = self.model.config.idalabel[best_class] snake_case_ = probabilities[best_class].item() snake_case_ = logits.tolist() return {"label": label, "score": score, "logits": logits}
8
import math def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(SCREAMING_SNAKE_CASE__ ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError('''This should never happen''' ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. lowerCAmelCase_ = '''Enter the base and the power separated by a comma: ''' lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''',''')) lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''',''')) # We find the log of each number, using the function res(), which takes two # arguments. lowerCAmelCase_ = res(xa, ya) lowerCAmelCase_ = res(xa, ya) # We check for the largest number if resa > resa: print('''Largest number is''', xa, '''^''', ya) elif resa > resa: print('''Largest number is''', xa, '''^''', ya) else: print('''Both are equal''')
8
1
import numpy as np from transformers import Pipeline def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int: lowerCAmelCase__ : List[str] = np.max(UpperCamelCase__ , axis=-1 , keepdims=UpperCamelCase__ ) lowerCAmelCase__ : int = np.exp(outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=UpperCamelCase__ ) class A__ ( __magic_name__ ): def _lowerCamelCase ( self : int , **a : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : int = {} if "second_text" in kwargs: lowerCAmelCase__ : Any = kwargs['''second_text'''] return preprocess_kwargs, {}, {} def _lowerCamelCase ( self : Tuple , a : Optional[Any] , a : Union[str, Any]=None ): '''simple docstring''' return self.tokenizer(a , text_pair=a , return_tensors=self.framework ) def _lowerCamelCase ( self : List[Any] , a : List[str] ): '''simple docstring''' return self.model(**a ) def _lowerCamelCase ( self : List[Any] , a : int ): '''simple docstring''' lowerCAmelCase__ : int = model_outputs.logits[0].numpy() lowerCAmelCase__ : Optional[Any] = softmax(a ) lowerCAmelCase__ : str = np.argmax(a ) lowerCAmelCase__ : int = self.model.config.idalabel[best_class] lowerCAmelCase__ : List[str] = probabilities[best_class].item() lowerCAmelCase__ : Dict = logits.tolist() return {"label": label, "score": score, "logits": logits}
352
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs lowerCamelCase__ = imread(r"""digital_image_processing/image_data/lena_small.jpg""") lowerCamelCase__ = cvtColor(img, COLOR_BGR2GRAY) def lowerCAmelCase__ ( ) -> Dict: lowerCAmelCase__ : List[Any] = cn.convert_to_negative(SCREAMING_SNAKE_CASE_ ) # assert negative_img array for at least one True assert negative_img.any() def lowerCAmelCase__ ( ) -> Optional[Any]: with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img: # Work around assertion for response assert str(cc.change_contrast(SCREAMING_SNAKE_CASE_ , 110 ) ).startswith( '<PIL.Image.Image image mode=RGB size=100x100 at' ) def lowerCAmelCase__ ( ) -> Tuple: lowerCAmelCase__ : str = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def lowerCAmelCase__ ( ) -> Tuple: lowerCAmelCase__ : Tuple = imread('digital_image_processing/image_data/lena_small.jpg' , 0 ) # assert ambiguous array for all == True assert canny_img.all() lowerCAmelCase__ : Optional[Any] = canny.canny(SCREAMING_SNAKE_CASE_ ) # assert canny array for at least one True assert canny_array.any() def lowerCAmelCase__ ( ) -> Optional[int]: assert gg.gaussian_filter(SCREAMING_SNAKE_CASE_ , 5 , sigma=0.9 ).all() def lowerCAmelCase__ ( ) -> Dict: # laplace diagonals lowerCAmelCase__ : Union[str, Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) lowerCAmelCase__ : int = conv.img_convolve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ ) assert res.any() def lowerCAmelCase__ ( ) -> List[str]: assert med.median_filter(SCREAMING_SNAKE_CASE_ , 3 ).any() def lowerCAmelCase__ ( ) -> Any: lowerCAmelCase__ , lowerCAmelCase__ : str = sob.sobel_filter(SCREAMING_SNAKE_CASE_ ) assert grad.any() and theta.any() def lowerCAmelCase__ ( ) -> Any: lowerCAmelCase__ : int = sp.make_sepia(SCREAMING_SNAKE_CASE_ , 20 ) assert sepia.all() def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[Any]: lowerCAmelCase__ : List[Any] = bs.Burkes(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 120 ) burkes.process() assert burkes.output_img.any() def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" , ) -> Any: lowerCAmelCase__ : Dict = rs.NearestNeighbour(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 400 , 200 ) nn.process() assert nn.output.any() def lowerCAmelCase__ ( ) -> int: lowerCAmelCase__ : int = 'digital_image_processing/image_data/lena.jpg' # Reading the image and converting it to grayscale. lowerCAmelCase__ : List[str] = imread(SCREAMING_SNAKE_CASE_ , 0 ) # Test for get_neighbors_pixel function() return not None lowerCAmelCase__ : str = 0 lowerCAmelCase__ : str = 0 lowerCAmelCase__ : List[str] = image[x_coordinate][y_coordinate] lowerCAmelCase__ : Dict = lbp.get_neighbors_pixel( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image lowerCAmelCase__ : List[str] = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): lowerCAmelCase__ : Dict = lbp.local_binary_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert lbp_image.any()
307
0
import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters lowercase__ : Any = logging.get_logger(__name__) def A_ ( snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : int , snake_case : int=None , snake_case : int=None ) -> List[str]: '''simple docstring''' if "." in tensor_name: __UpperCamelCase = tensor_name.split('''.''' ) for split in splits[:-1]: __UpperCamelCase = getattr(snake_case , snake_case ) if new_module is None: raise ValueError(f"{module} has no attribute {split}." ) __UpperCamelCase = new_module __UpperCamelCase = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}." ) __UpperCamelCase = tensor_name in module._buffers __UpperCamelCase = getattr(snake_case , snake_case ) if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None: raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {device}." ) __UpperCamelCase = False __UpperCamelCase = False if is_buffer or not is_bitsandbytes_available(): __UpperCamelCase = False __UpperCamelCase = False else: __UpperCamelCase = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) __UpperCamelCase = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: __UpperCamelCase = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: __UpperCamelCase = old_value.to(snake_case ) elif isinstance(snake_case , torch.Tensor ): __UpperCamelCase = value.to('''cpu''' ) if value.dtype == torch.inta: __UpperCamelCase = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse( '''0.37.2''' ) if not is_abit_serializable: raise ValueError( '''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. ''' '''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' ) else: __UpperCamelCase = torch.tensor(snake_case , device='''cpu''' ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , snake_case ) and fpaa_statistics is None: __UpperCamelCase = new_value.T __UpperCamelCase = old_value.__dict__ if is_abit: __UpperCamelCase = bnb.nn.IntaParams(snake_case , requires_grad=snake_case , **snake_case ).to(snake_case ) elif is_abit: __UpperCamelCase = bnb.nn.Paramsabit(snake_case , requires_grad=snake_case , **snake_case ).to(snake_case ) __UpperCamelCase = new_value if fpaa_statistics is not None: setattr(module.weight , '''SCB''' , fpaa_statistics.to(snake_case ) ) else: if value is None: __UpperCamelCase = old_value.to(snake_case ) elif isinstance(snake_case , torch.Tensor ): __UpperCamelCase = value.to(snake_case ) else: __UpperCamelCase = torch.tensor(snake_case , device=snake_case ) if is_buffer: __UpperCamelCase = new_value else: __UpperCamelCase = nn.Parameter(snake_case , requires_grad=old_value.requires_grad ) __UpperCamelCase = new_value def A_ ( snake_case : Optional[Any] , snake_case : Tuple=None , snake_case : List[Any]=None , snake_case : Dict=None , snake_case : List[str]=False ) -> str: '''simple docstring''' for name, module in model.named_children(): if current_key_name is None: __UpperCamelCase = [] current_key_name.append(snake_case ) if (isinstance(snake_case , nn.Linear ) or isinstance(snake_case , snake_case )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in '''.'''.join(snake_case ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(snake_case , snake_case ): __UpperCamelCase , __UpperCamelCase = module.weight.shape else: __UpperCamelCase = module.in_features __UpperCamelCase = module.out_features if quantization_config.quantization_method() == "llm_int8": __UpperCamelCase = bnb.nn.LinearabitLt( snake_case , snake_case , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) __UpperCamelCase = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: __UpperCamelCase = bnb.nn.Linearabit( snake_case , snake_case , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) __UpperCamelCase = True # Store the module class in case we need to transpose the weight later __UpperCamelCase = type(snake_case ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(snake_case ) if len(list(module.children() ) ) > 0: __UpperCamelCase , __UpperCamelCase = _replace_with_bnb_linear( snake_case , snake_case , snake_case , snake_case , has_been_replaced=snake_case , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def A_ ( snake_case : Optional[Any] , snake_case : Any=None , snake_case : str=None , snake_case : Dict=None ) -> Any: '''simple docstring''' __UpperCamelCase = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert __UpperCamelCase , __UpperCamelCase = _replace_with_bnb_linear( snake_case , snake_case , snake_case , snake_case ) if not has_been_replaced: logger.warning( '''You are loading your model in 8bit or 4bit but no linear modules were found in your model.''' ''' Please double check your model architecture, or submit an issue on github if you think this is''' ''' a bug.''' ) return model def A_ ( *snake_case : Optional[int] , **snake_case : Optional[Any] ) -> str: '''simple docstring''' warnings.warn( '''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , snake_case , ) return replace_with_bnb_linear(*snake_case , **snake_case ) def A_ ( *snake_case : List[Any] , **snake_case : str ) -> Any: '''simple docstring''' warnings.warn( '''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , snake_case , ) return set_module_quantized_tensor_to_device(*snake_case , **snake_case ) def A_ ( snake_case : Any ) -> Any: '''simple docstring''' __UpperCamelCase = deepcopy(snake_case ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() __UpperCamelCase = find_tied_parameters(snake_case ) # For compatibility with Accelerate < 0.18 if isinstance(snake_case , snake_case ): __UpperCamelCase = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: __UpperCamelCase = sum(snake_case , [] ) __UpperCamelCase = len(snake_case ) > 0 # Check if it is a base model __UpperCamelCase = not hasattr(snake_case , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head __UpperCamelCase = list(model.named_children() ) __UpperCamelCase = [list_modules[-1][0]] # add last module together with tied weights __UpperCamelCase = set(snake_case ) - set(snake_case ) __UpperCamelCase = list(set(snake_case ) ) + list(snake_case ) # remove ".weight" from the keys __UpperCamelCase = ['''.weight''', '''.bias'''] __UpperCamelCase = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: __UpperCamelCase = name.replace(snake_case , '''''' ) filtered_module_names.append(snake_case ) return filtered_module_names
328
from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar A__ = TypeVar("""T""") A__ = TypeVar("""U""") class __lowerCAmelCase ( Generic[T, U] ): def __init__( self , _snake_case , _snake_case ): """simple docstring""" _lowerCAmelCase = key _lowerCAmelCase = val _lowerCAmelCase = None _lowerCAmelCase = None def __repr__( self ): """simple docstring""" return ( F'Node: key: {self.key}, val: {self.val}, ' F'has next: {bool(self.next )}, has prev: {bool(self.prev )}' ) class __lowerCAmelCase ( Generic[T, U] ): def __init__( self ): """simple docstring""" _lowerCAmelCase = DoubleLinkedListNode(_snake_case , _snake_case ) _lowerCAmelCase = DoubleLinkedListNode(_snake_case , _snake_case ) _lowerCAmelCase , _lowerCAmelCase = self.rear, self.head def __repr__( self ): """simple docstring""" _lowerCAmelCase = ["""DoubleLinkedList"""] _lowerCAmelCase = self.head while node.next is not None: rep.append(str(_snake_case ) ) _lowerCAmelCase = node.next rep.append(str(self.rear ) ) return ",\n ".join(_snake_case ) def snake_case ( self , _snake_case ): """simple docstring""" _lowerCAmelCase = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None _lowerCAmelCase = node _lowerCAmelCase = previous _lowerCAmelCase = node _lowerCAmelCase = self.rear def snake_case ( self , _snake_case ): """simple docstring""" if node.prev is None or node.next is None: return None _lowerCAmelCase = node.next _lowerCAmelCase = node.prev _lowerCAmelCase = None _lowerCAmelCase = None return node class __lowerCAmelCase ( Generic[T, U] ): __lowerCamelCase = {} def __init__( self , _snake_case ): """simple docstring""" _lowerCAmelCase = DoubleLinkedList() _lowerCAmelCase = capacity _lowerCAmelCase = 0 _lowerCAmelCase = 0 _lowerCAmelCase = 0 _lowerCAmelCase = {} def __repr__( self ): """simple docstring""" return ( F'CacheInfo(hits={self.hits}, misses={self.miss}, ' F'capacity={self.capacity}, current size={self.num_keys})' ) def __contains__( self , _snake_case ): """simple docstring""" return key in self.cache def snake_case ( self , _snake_case ): """simple docstring""" if key in self.cache: self.hits += 1 _lowerCAmelCase = self.cache[key] _lowerCAmelCase = self.list.remove(self.cache[key] ) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(_snake_case ) return node.val self.miss += 1 return None def snake_case ( self , _snake_case , _snake_case ): """simple docstring""" if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity _lowerCAmelCase = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(_snake_case ) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 _lowerCAmelCase = DoubleLinkedListNode(_snake_case , _snake_case ) self.list.add(self.cache[key] ) self.num_keys += 1 else: # bump node to the end of the list, update value _lowerCAmelCase = self.list.remove(self.cache[key] ) assert node is not None # node guaranteed to be in list _lowerCAmelCase = value self.list.add(_snake_case ) @classmethod def snake_case ( cls , _snake_case = 128 ): """simple docstring""" def cache_decorator_inner(_snake_case ) -> Callable[..., U]: def cache_decorator_wrapper(*_snake_case ) -> U: if func not in cls.decorator_function_to_instance_map: _lowerCAmelCase = LRUCache(_snake_case ) _lowerCAmelCase = cls.decorator_function_to_instance_map[func].get(args[0] ) if result is None: _lowerCAmelCase = func(*_snake_case ) cls.decorator_function_to_instance_map[func].put(args[0] , _snake_case ) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(_snake_case , """cache_info""" , _snake_case ) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
82
0
"""simple docstring""" from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def _UpperCAmelCase ( ) -> Optional[Any]: _snake_case = { '''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''], '''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''], '''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7], } _snake_case = Dataset.from_dict(lowercase_ ) return dataset class lowerCAmelCase__ ( __snake_case ): def lowercase ( self : List[Any] ): _snake_case = get_dataset() _snake_case = make_duplicate_clusters(_lowerCamelCase , 0.8_5 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def lowercase ( self : int ): _snake_case = get_dataset() _snake_case , _snake_case = deduplicate_dataset(_lowerCamelCase ) self.assertEqual(len(_lowerCamelCase ) , 2 ) print(_lowerCamelCase ) self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 ) self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , _lowerCamelCase )
362
"""simple docstring""" import requests from bsa import BeautifulSoup def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : dict ) -> str: _snake_case = BeautifulSoup(requests.get(__lowerCamelCase , params=__lowerCamelCase ).content , '''html.parser''' ) _snake_case = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} ) _snake_case = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' ) return anchors[2].get_text() if __name__ == "__main__": UpperCAmelCase__ = { 'title': ( 'Precisely geometry controlled microsupercapacitors for ultrahigh areal ' 'capacitance, volumetric capacitance, and energy density' ), 'journal': 'Chem. Mater.', 'volume': 30, 'pages': '3979-3990', 'year': 2018, 'hl': 'en', } print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
40
0
import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml a__ = logging.get_logger(__name__) def lowercase ( SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : bool ) -> List[Any]: def run_func(SCREAMING_SNAKE_CASE__ : Optional[int] ): @wraps(SCREAMING_SNAKE_CASE__ ) def run_in_eager_mode(*SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ): return func(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) @wraps(SCREAMING_SNAKE_CASE__ ) @tf.function(experimental_compile=SCREAMING_SNAKE_CASE__ ) def run_in_graph_mode(*SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : List[Any] ): return func(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def lowercase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> ["tf.Tensor"]: _snake_case : Union[str, Any] = random.Random() _snake_case : str = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(SCREAMING_SNAKE_CASE__ , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class snake_case ( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' snake_case_ : TensorFlowBenchmarkArguments snake_case_ : PretrainedConfig snake_case_ : str = "TensorFlow" @property def UpperCamelCase_ ( self : Dict) -> Any: """simple docstring""" return tf.__version__ def UpperCamelCase_ ( self : str , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : int) -> float: """simple docstring""" _snake_case : Dict = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""") _snake_case : int = self._prepare_inference_func(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase) return self._measure_speed(_inference) def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : int) -> float: """simple docstring""" _snake_case : Tuple = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""") _snake_case : Any = self._prepare_train_func(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase) return self._measure_speed(_train) def UpperCamelCase_ ( self : int , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : int) -> [Memory, Optional[MemorySummary]]: """simple docstring""" if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCAmelCase) _snake_case : Dict = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""") _snake_case : Optional[int] = self._prepare_inference_func(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase) return self._measure_memory(_inference) def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : int) -> [Memory, Optional[MemorySummary]]: """simple docstring""" if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCAmelCase) _snake_case : int = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""") _snake_case : str = self._prepare_train_func(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase) return self._measure_memory(_train) def UpperCamelCase_ ( self : Dict , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : int) -> Callable[[], None]: """simple docstring""" _snake_case : List[Any] = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""") _snake_case : Union[str, Any] = ( hasattr(lowerCAmelCase , """architectures""") and isinstance(config.architectures , lowerCAmelCase) and len(config.architectures) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _snake_case : List[str] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _snake_case : Optional[int] = __import__("""transformers""" , fromlist=[model_class]) _snake_case : List[str] = getattr(lowerCAmelCase , lowerCAmelCase) _snake_case : Any = model_cls(lowerCAmelCase) except ImportError: raise ImportError( F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""") else: _snake_case : int = TF_MODEL_MAPPING[config.__class__](lowerCAmelCase) # encoder-decoder has vocab size saved differently _snake_case : Union[str, Any] = config.vocab_size if hasattr(lowerCAmelCase , """vocab_size""") else config.encoder.vocab_size _snake_case : Tuple = random_input_ids(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla) def encoder_decoder_forward(): return model(lowerCAmelCase , decoder_input_ids=lowerCAmelCase , training=lowerCAmelCase) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla) def encoder_forward(): return model(lowerCAmelCase , training=lowerCAmelCase) _snake_case : Dict = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : int) -> Callable[[], None]: """simple docstring""" _snake_case : List[str] = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""") if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""") _snake_case : List[Any] = ( hasattr(lowerCAmelCase , """architectures""") and isinstance(config.architectures , lowerCAmelCase) and len(config.architectures) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _snake_case : Union[str, Any] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _snake_case : Optional[int] = __import__("""transformers""" , fromlist=[model_class]) _snake_case : Any = getattr(lowerCAmelCase , lowerCAmelCase) _snake_case : Optional[int] = model_cls(lowerCAmelCase) except ImportError: raise ImportError( F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""") else: _snake_case : Union[str, Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowerCAmelCase) # encoder-decoder has vocab size saved differently _snake_case : Union[str, Any] = config.vocab_size if hasattr(lowerCAmelCase , """vocab_size""") else config.encoder.vocab_size _snake_case : Dict = random_input_ids(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla) def encoder_decoder_train(): _snake_case : Dict = model(lowerCAmelCase , decoder_input_ids=lowerCAmelCase , labels=lowerCAmelCase , training=lowerCAmelCase)[0] _snake_case : int = tf.gradients(lowerCAmelCase , model.trainable_variables) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla) def encoder_train(): _snake_case : Optional[Any] = model(lowerCAmelCase , labels=lowerCAmelCase , training=lowerCAmelCase)[0] _snake_case : Dict = tf.gradients(lowerCAmelCase , model.trainable_variables) return gradients _snake_case : str = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : List[Any]) -> float: """simple docstring""" with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""") timeit.repeat(lowerCAmelCase , repeat=1 , number=5) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average _snake_case : Any = timeit.repeat( lowerCAmelCase , repeat=self.args.repeat , number=10 , ) return min(lowerCAmelCase) / 10.0 except ResourceExhaustedError as e: self.print_fn(F'''Doesn\'t fit on GPU. {e}''') def UpperCamelCase_ ( self : Dict , lowerCAmelCase : Callable[[], None]) -> [Memory, MemorySummary]: """simple docstring""" logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""") with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""") _snake_case : Union[str, Any] = start_memory_tracing("""transformers""") if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""") elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won't log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""") _snake_case : Any = """N/A""" else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""") # init nvml nvml.nvmlInit() func() _snake_case : List[str] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx) _snake_case : Optional[Any] = nvml.nvmlDeviceGetMemoryInfo(lowerCAmelCase) _snake_case : Tuple = meminfo.used _snake_case : str = Memory(lowerCAmelCase) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""") _snake_case : Tuple = None else: _snake_case : Optional[int] = measure_peak_memory_cpu(lowerCAmelCase) _snake_case : str = Memory(lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else memory_bytes if self.args.trace_memory_line_by_line: _snake_case : Union[str, Any] = stop_memory_tracing(lowerCAmelCase) if memory is None: _snake_case : List[str] = summary.total else: _snake_case : List[Any] = None return memory, summary except ResourceExhaustedError as e: self.print_fn(F'''Doesn\'t fit on GPU. {e}''') return "N/A", None
317
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig a__ = logging.get_logger(__name__) # General docstring a__ = """RegNetConfig""" # Base docstring a__ = """facebook/regnet-y-040""" a__ = [1, 10_88, 7, 7] # Image classification docstring a__ = """facebook/regnet-y-040""" a__ = """tabby, tabby cat""" a__ = [ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class snake_case ( nn.Module ): '''simple docstring''' def __init__( self : Dict , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 1 , lowerCAmelCase : Optional[str] = "relu" , ) -> List[str]: """simple docstring""" super().__init__() _snake_case : int = nn.Convad( lowerCAmelCase , lowerCAmelCase , kernel_size=lowerCAmelCase , stride=lowerCAmelCase , padding=kernel_size // 2 , groups=lowerCAmelCase , bias=lowerCAmelCase , ) _snake_case : List[Any] = nn.BatchNormad(lowerCAmelCase) _snake_case : Tuple = ACTaFN[activation] if activation is not None else nn.Identity() def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : List[Any]) -> List[str]: """simple docstring""" _snake_case : Tuple = self.convolution(lowerCAmelCase) _snake_case : Any = self.normalization(lowerCAmelCase) _snake_case : List[Any] = self.activation(lowerCAmelCase) return hidden_state class snake_case ( nn.Module ): '''simple docstring''' def __init__( self : Union[str, Any] , lowerCAmelCase : RegNetConfig) -> List[str]: """simple docstring""" super().__init__() _snake_case : Dict = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act) _snake_case : Dict = config.num_channels def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : int) -> List[str]: """simple docstring""" _snake_case : str = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( """Make sure that the channel dimension of the pixel values match with the one set in the configuration.""") _snake_case : Any = self.embedder(lowerCAmelCase) return hidden_state class snake_case ( nn.Module ): '''simple docstring''' def __init__( self : Tuple , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 2) -> Optional[Any]: """simple docstring""" super().__init__() _snake_case : Optional[Any] = nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , stride=lowerCAmelCase , bias=lowerCAmelCase) _snake_case : Tuple = nn.BatchNormad(lowerCAmelCase) def UpperCamelCase_ ( self : int , lowerCAmelCase : Tensor) -> Tensor: """simple docstring""" _snake_case : Optional[Any] = self.convolution(lowerCAmelCase) _snake_case : Optional[int] = self.normalization(lowerCAmelCase) return hidden_state class snake_case ( nn.Module ): '''simple docstring''' def __init__( self : Dict , lowerCAmelCase : int , lowerCAmelCase : int) -> Any: """simple docstring""" super().__init__() _snake_case : Optional[Any] = nn.AdaptiveAvgPoolad((1, 1)) _snake_case : Optional[Any] = nn.Sequential( nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1) , nn.ReLU() , nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1) , nn.Sigmoid() , ) def UpperCamelCase_ ( self : Any , lowerCAmelCase : Tuple) -> Optional[int]: """simple docstring""" _snake_case : Dict = self.pooler(lowerCAmelCase) _snake_case : List[str] = self.attention(lowerCAmelCase) _snake_case : str = hidden_state * attention return hidden_state class snake_case ( nn.Module ): '''simple docstring''' def __init__( self : int , lowerCAmelCase : RegNetConfig , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 1) -> Union[str, Any]: """simple docstring""" super().__init__() _snake_case : Optional[int] = in_channels != out_channels or stride != 1 _snake_case : Optional[Any] = max(1 , out_channels // config.groups_width) _snake_case : Union[str, Any] = ( RegNetShortCut(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase) if should_apply_shortcut else nn.Identity() ) _snake_case : Tuple = nn.Sequential( RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , groups=lowerCAmelCase , activation=config.hidden_act) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=lowerCAmelCase) , ) _snake_case : Dict = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : Optional[int]) -> Union[str, Any]: """simple docstring""" _snake_case : Union[str, Any] = hidden_state _snake_case : int = self.layer(lowerCAmelCase) _snake_case : Dict = self.shortcut(lowerCAmelCase) hidden_state += residual _snake_case : str = self.activation(lowerCAmelCase) return hidden_state class snake_case ( nn.Module ): '''simple docstring''' def __init__( self : Union[str, Any] , lowerCAmelCase : RegNetConfig , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 1) -> Optional[Any]: """simple docstring""" super().__init__() _snake_case : int = in_channels != out_channels or stride != 1 _snake_case : Dict = max(1 , out_channels // config.groups_width) _snake_case : Tuple = ( RegNetShortCut(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase) if should_apply_shortcut else nn.Identity() ) _snake_case : Dict = nn.Sequential( RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , groups=lowerCAmelCase , activation=config.hidden_act) , RegNetSELayer(lowerCAmelCase , reduced_channels=int(round(in_channels / 4))) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=lowerCAmelCase) , ) _snake_case : Optional[Any] = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self : Optional[int] , lowerCAmelCase : List[Any]) -> Tuple: """simple docstring""" _snake_case : Tuple = hidden_state _snake_case : List[Any] = self.layer(lowerCAmelCase) _snake_case : List[str] = self.shortcut(lowerCAmelCase) hidden_state += residual _snake_case : int = self.activation(lowerCAmelCase) return hidden_state class snake_case ( nn.Module ): '''simple docstring''' def __init__( self : Dict , lowerCAmelCase : RegNetConfig , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 2 , ) -> int: """simple docstring""" super().__init__() _snake_case : Optional[Any] = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer _snake_case : Optional[int] = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , ) , *[layer(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase) for _ in range(depth - 1)] , ) def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Union[str, Any]) -> str: """simple docstring""" _snake_case : List[str] = self.layers(lowerCAmelCase) return hidden_state class snake_case ( nn.Module ): '''simple docstring''' def __init__( self : Optional[Any] , lowerCAmelCase : RegNetConfig) -> List[str]: """simple docstring""" super().__init__() _snake_case : Dict = nn.ModuleList([]) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , )) _snake_case : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:]) for (in_channels, out_channels), depth in zip(lowerCAmelCase , config.depths[1:]): self.stages.append(RegNetStage(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , depth=lowerCAmelCase)) def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : Tensor , lowerCAmelCase : bool = False , lowerCAmelCase : bool = True) -> BaseModelOutputWithNoAttention: """simple docstring""" _snake_case : Dict = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _snake_case : Optional[int] = hidden_states + (hidden_state,) _snake_case : Dict = stage_module(lowerCAmelCase) if output_hidden_states: _snake_case : Tuple = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None) return BaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase , hidden_states=lowerCAmelCase) class snake_case ( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' snake_case_ : Optional[Any] = RegNetConfig snake_case_ : List[Any] = """regnet""" snake_case_ : Any = """pixel_values""" snake_case_ : Optional[Any] = True def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : List[str]) -> List[Any]: """simple docstring""" if isinstance(lowerCAmelCase , nn.Convad): nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""") elif isinstance(lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm)): nn.init.constant_(module.weight , 1) nn.init.constant_(module.bias , 0) def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=False) -> Optional[int]: """simple docstring""" if isinstance(lowerCAmelCase , lowerCAmelCase): _snake_case : Optional[Any] = value a__ = R""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ a__ = R""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( """The bare RegNet model outputting raw features without any specific head on top.""" ,SCREAMING_SNAKE_CASE_ ,) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class snake_case ( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self : List[Any] , lowerCAmelCase : List[str]) -> Dict: """simple docstring""" super().__init__(lowerCAmelCase) _snake_case : Any = config _snake_case : Any = RegNetEmbeddings(lowerCAmelCase) _snake_case : Dict = RegNetEncoder(lowerCAmelCase) _snake_case : Tuple = nn.AdaptiveAvgPoolad((1, 1)) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : Tensor , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[bool] = None) -> BaseModelOutputWithPoolingAndNoAttention: """simple docstring""" _snake_case : Optional[int] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _snake_case : int = return_dict if return_dict is not None else self.config.use_return_dict _snake_case : str = self.embedder(lowerCAmelCase) _snake_case : Optional[Any] = self.encoder( lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase) _snake_case : Tuple = encoder_outputs[0] _snake_case : Optional[Any] = self.pooler(lowerCAmelCase) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowerCAmelCase , pooler_output=lowerCAmelCase , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( """ RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """ ,SCREAMING_SNAKE_CASE_ ,) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class snake_case ( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self : int , lowerCAmelCase : int) -> Tuple: """simple docstring""" super().__init__(lowerCAmelCase) _snake_case : Union[str, Any] = config.num_labels _snake_case : List[Any] = RegNetModel(lowerCAmelCase) # classification head _snake_case : Union[str, Any] = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def UpperCamelCase_ ( self : int , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[torch.LongTensor] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention: """simple docstring""" _snake_case : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict _snake_case : Tuple = self.regnet(lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase) _snake_case : str = outputs.pooler_output if return_dict else outputs[1] _snake_case : Optional[Any] = self.classifier(lowerCAmelCase) _snake_case : Any = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _snake_case : List[Any] = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _snake_case : Optional[int] = """single_label_classification""" else: _snake_case : Tuple = """multi_label_classification""" if self.config.problem_type == "regression": _snake_case : List[str] = MSELoss() if self.num_labels == 1: _snake_case : Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze()) else: _snake_case : List[str] = loss_fct(lowerCAmelCase , lowerCAmelCase) elif self.config.problem_type == "single_label_classification": _snake_case : Dict = CrossEntropyLoss() _snake_case : int = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1)) elif self.config.problem_type == "multi_label_classification": _snake_case : Optional[int] = BCEWithLogitsLoss() _snake_case : List[str] = loss_fct(lowerCAmelCase , lowerCAmelCase) if not return_dict: _snake_case : Optional[Any] = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase , logits=lowerCAmelCase , hidden_states=outputs.hidden_states)
317
1
import random from typing import Any def _UpperCamelCase (a__ :list ): """simple docstring""" for _ in range(len(a__ ) ): UpperCamelCase__ = random.randint(0 , len(a__ ) - 1 ) UpperCamelCase__ = random.randint(0 , len(a__ ) - 1 ) UpperCamelCase__ , UpperCamelCase__ = data[b], data[a] return data if __name__ == "__main__": UpperCamelCase__ = [0, 1, 2, 3, 4, 5, 6, 7] UpperCamelCase__ = ["python", "says", "hello", "!"] print("Fisher-Yates Shuffle:") print("List", integers, strings) print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
366
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { "microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json", # See all Cvt models at https://huggingface.co/models?filter=cvt } class __SCREAMING_SNAKE_CASE ( _a ): snake_case : int = """cvt""" def __init__( self , __lowerCAmelCase=3 , __lowerCAmelCase=[7, 3, 3] , __lowerCAmelCase=[4, 2, 2] , __lowerCAmelCase=[2, 1, 1] , __lowerCAmelCase=[64, 192, 384] , __lowerCAmelCase=[1, 3, 6] , __lowerCAmelCase=[1, 2, 10] , __lowerCAmelCase=[4.0, 4.0, 4.0] , __lowerCAmelCase=[0.0, 0.0, 0.0] , __lowerCAmelCase=[0.0, 0.0, 0.0] , __lowerCAmelCase=[0.0, 0.0, 0.1] , __lowerCAmelCase=[True, True, True] , __lowerCAmelCase=[False, False, True] , __lowerCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , __lowerCAmelCase=[3, 3, 3] , __lowerCAmelCase=[1, 1, 1] , __lowerCAmelCase=[2, 2, 2] , __lowerCAmelCase=[1, 1, 1] , __lowerCAmelCase=[1, 1, 1] , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , **__lowerCAmelCase , ): super().__init__(**__lowerCAmelCase ) UpperCamelCase__ = num_channels UpperCamelCase__ = patch_sizes UpperCamelCase__ = patch_stride UpperCamelCase__ = patch_padding UpperCamelCase__ = embed_dim UpperCamelCase__ = num_heads UpperCamelCase__ = depth UpperCamelCase__ = mlp_ratio UpperCamelCase__ = attention_drop_rate UpperCamelCase__ = drop_rate UpperCamelCase__ = drop_path_rate UpperCamelCase__ = qkv_bias UpperCamelCase__ = cls_token UpperCamelCase__ = qkv_projection_method UpperCamelCase__ = kernel_qkv UpperCamelCase__ = padding_kv UpperCamelCase__ = stride_kv UpperCamelCase__ = padding_q UpperCamelCase__ = stride_q UpperCamelCase__ = initializer_range UpperCamelCase__ = layer_norm_eps
87
0
import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py lowercase__ ='\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n' lowercase__ ='\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n' lowercase__ ='\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class UpperCamelCase__ ( datasets.Metric ): def lowerCAmelCase (self : int ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=['''https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/BLEU''', '''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''', ] , ) def lowerCAmelCase (self : List[Any] , snake_case_ : Any , snake_case_ : Any , snake_case_ : Union[str, Any]=4 , snake_case_ : str=False ): __a : Union[str, Any] = compute_bleu( reference_corpus=lowerCAmelCase_ , translation_corpus=lowerCAmelCase_ , max_order=lowerCAmelCase_ , smooth=lowerCAmelCase_ ) ((__a) , (__a) , (__a) , (__a) , (__a) , (__a)) : Any = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
216
import re from filelock import FileLock try: import nltk _snake_case : Any = True except (ImportError, ModuleNotFoundError): _snake_case : Union[str, Any] = False if NLTK_AVAILABLE: with FileLock('.lock') as lock: nltk.download('punkt', quiet=True) def a_ ( lowerCAmelCase_ : str ): re.sub('<n>', '', lowerCAmelCase_ ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(lowerCAmelCase_ ) )
284
0
import bza import gzip import lzma import os import shutil import struct import tarfile import warnings import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, List, Optional, Type, Union from .. import config from .filelock import FileLock from .logging import get_logger lowerCAmelCase : List[Any] = get_logger(__name__) class __lowercase : """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : Optional[str] = None): SCREAMING_SNAKE_CASE_: Union[str, Any] = ( os.path.join(lowerCAmelCase__ , config.EXTRACTED_DATASETS_DIR) if cache_dir else config.EXTRACTED_DATASETS_PATH ) SCREAMING_SNAKE_CASE_: str = Extractor def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : str): from .file_utils import hash_url_to_filename # Path where we extract compressed archives # We extract in the cache dir, and get the extracted path name by hashing the original path" SCREAMING_SNAKE_CASE_: List[str] = os.path.abspath(lowerCAmelCase__) return os.path.join(self.extract_dir , hash_url_to_filename(lowerCAmelCase__)) def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : bool): return force_extract or ( not os.path.isfile(lowerCAmelCase__) and not (os.path.isdir(lowerCAmelCase__) and os.listdir(lowerCAmelCase__)) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : bool = False): SCREAMING_SNAKE_CASE_: Dict = self.extractor.infer_extractor_format(lowerCAmelCase__) if not extractor_format: return input_path SCREAMING_SNAKE_CASE_: str = self._get_output_path(lowerCAmelCase__) if self._do_extract(lowerCAmelCase__ , lowerCAmelCase__): self.extractor.extract(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) return output_path class __lowercase ( UpperCAmelCase_ ): """simple docstring""" @classmethod @abstractmethod def _SCREAMING_SNAKE_CASE ( cls : str , lowerCAmelCase__ : Union[Path, str] , **lowerCAmelCase__ : str): ... @staticmethod @abstractmethod def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Union[Path, str] , lowerCAmelCase__ : Union[Path, str]): ... class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : List[bytes] = [] @staticmethod def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Union[Path, str] , lowerCAmelCase__ : int): with open(lowerCAmelCase__ , "rb") as f: return f.read(lowerCAmelCase__) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] , lowerCAmelCase__ : Union[Path, str] , lowerCAmelCase__ : bytes = b""): if not magic_number: SCREAMING_SNAKE_CASE_: Dict = max(len(lowerCAmelCase__) for cls_magic_number in cls.magic_numbers) try: SCREAMING_SNAKE_CASE_: Optional[int] = cls.read_magic_number(lowerCAmelCase__ , lowerCAmelCase__) except OSError: return False return any(magic_number.startswith(lowerCAmelCase__) for cls_magic_number in cls.magic_numbers) class __lowercase ( UpperCAmelCase_ ): """simple docstring""" @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[int] , lowerCAmelCase__ : Union[Path, str] , **lowerCAmelCase__ : Tuple): return tarfile.is_tarfile(lowerCAmelCase__) @staticmethod def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str]): def resolved(lowerCAmelCase__ : str) -> str: return os.path.realpath(os.path.abspath(lowerCAmelCase__)) def badpath(lowerCAmelCase__ : str , lowerCAmelCase__ : str) -> bool: # joinpath will ignore base if path is absolute return not resolved(os.path.join(lowerCAmelCase__ , lowerCAmelCase__)).startswith(lowerCAmelCase__) def badlink(lowerCAmelCase__ : Any , lowerCAmelCase__ : str) -> bool: # Links are interpreted relative to the directory containing the link SCREAMING_SNAKE_CASE_: Dict = resolved(os.path.join(lowerCAmelCase__ , os.path.dirname(info.name))) return badpath(info.linkname , base=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = resolved(lowerCAmelCase__) for finfo in members: if badpath(finfo.name , lowerCAmelCase__): logger.error(F"Extraction of {finfo.name} is blocked (illegal path)") elif finfo.issym() and badlink(lowerCAmelCase__ , lowerCAmelCase__): logger.error(F"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}") elif finfo.islnk() and badlink(lowerCAmelCase__ , lowerCAmelCase__): logger.error(F"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}") else: yield finfo @staticmethod def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Union[Path, str] , lowerCAmelCase__ : Union[Path, str]): os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = tarfile.open(lowerCAmelCase__) tar_file.extractall(lowerCAmelCase__ , members=TarExtractor.safemembers(lowerCAmelCase__ , lowerCAmelCase__)) tar_file.close() class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Optional[Any] = [b'''\x1F\x8B'''] @staticmethod def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Union[Path, str] , lowerCAmelCase__ : Union[Path, str]): with gzip.open(lowerCAmelCase__ , "rb") as gzip_file: with open(lowerCAmelCase__ , "wb") as extracted_file: shutil.copyfileobj(lowerCAmelCase__ , lowerCAmelCase__) class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Tuple = [ b'''PK\x03\x04''', b'''PK\x05\x06''', # empty archive b'''PK\x07\x08''', # spanned archive ] @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[str] , lowerCAmelCase__ : Union[Path, str] , lowerCAmelCase__ : bytes = b""): if super().is_extractable(lowerCAmelCase__ , magic_number=lowerCAmelCase__): return True try: # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives. # From: https://github.com/python/cpython/pull/5053 from zipfile import ( _CD_SIGNATURE, _ECD_DISK_NUMBER, _ECD_DISK_START, _ECD_ENTRIES_TOTAL, _ECD_OFFSET, _ECD_SIZE, _EndRecData, sizeCentralDir, stringCentralDir, structCentralDir, ) with open(lowerCAmelCase__ , "rb") as fp: SCREAMING_SNAKE_CASE_: int = _EndRecData(lowerCAmelCase__) if endrec: if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0: return True # Empty zipfiles are still zipfiles elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]: fp.seek(endrec[_ECD_OFFSET]) # Central directory is on the same disk if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir: SCREAMING_SNAKE_CASE_: List[Any] = fp.read(lowerCAmelCase__) # CD is where we expect it to be if len(lowerCAmelCase__) == sizeCentralDir: SCREAMING_SNAKE_CASE_: Optional[int] = struct.unpack(lowerCAmelCase__ , lowerCAmelCase__) # CD is the right size if centdir[_CD_SIGNATURE] == stringCentralDir: return True # First central directory entry has correct magic number return False except Exception: # catch all errors in case future python versions change the zipfile internals return False @staticmethod def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Union[Path, str] , lowerCAmelCase__ : Union[Path, str]): os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__) with zipfile.ZipFile(lowerCAmelCase__ , "r") as zip_file: zip_file.extractall(lowerCAmelCase__) zip_file.close() class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Tuple = [b'''\xFD\x37\x7A\x58\x5A\x00'''] @staticmethod def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Union[Path, str] , lowerCAmelCase__ : Union[Path, str]): with lzma.open(lowerCAmelCase__) as compressed_file: with open(lowerCAmelCase__ , "wb") as extracted_file: shutil.copyfileobj(lowerCAmelCase__ , lowerCAmelCase__) class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Optional[int] = [b'''Rar!\x1a\x07\x00''', b'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID @staticmethod def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Union[Path, str] , lowerCAmelCase__ : Union[Path, str]): if not config.RARFILE_AVAILABLE: raise ImportError("Please pip install rarfile") import rarfile os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = rarfile.RarFile(lowerCAmelCase__) rf.extractall(lowerCAmelCase__) rf.close() class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Dict = [b'''\x28\xb5\x2F\xFD'''] @staticmethod def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Union[Path, str] , lowerCAmelCase__ : Union[Path, str]): if not config.ZSTANDARD_AVAILABLE: raise ImportError("Please pip install zstandard") import zstandard as zstd SCREAMING_SNAKE_CASE_: Optional[Any] = zstd.ZstdDecompressor() with open(lowerCAmelCase__ , "rb") as ifh, open(lowerCAmelCase__ , "wb") as ofh: dctx.copy_stream(lowerCAmelCase__ , lowerCAmelCase__) class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Dict = [b'''\x42\x5A\x68'''] @staticmethod def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Union[Path, str] , lowerCAmelCase__ : Union[Path, str]): with bza.open(lowerCAmelCase__ , "rb") as compressed_file: with open(lowerCAmelCase__ , "wb") as extracted_file: shutil.copyfileobj(lowerCAmelCase__ , lowerCAmelCase__) class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Tuple = [b'''\x37\x7A\xBC\xAF\x27\x1C'''] @staticmethod def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Union[Path, str] , lowerCAmelCase__ : Union[Path, str]): if not config.PY7ZR_AVAILABLE: raise ImportError("Please pip install py7zr") import pyazr os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__) with pyazr.SevenZipFile(lowerCAmelCase__ , "r") as archive: archive.extractall(lowerCAmelCase__) class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Tuple = [b'''\x04\x22\x4D\x18'''] @staticmethod def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Union[Path, str] , lowerCAmelCase__ : Union[Path, str]): if not config.LZ4_AVAILABLE: raise ImportError("Please pip install lz4") import lza.frame with lza.frame.open(lowerCAmelCase__ , "rb") as compressed_file: with open(lowerCAmelCase__ , "wb") as extracted_file: shutil.copyfileobj(lowerCAmelCase__ , lowerCAmelCase__) class __lowercase : """simple docstring""" _UpperCAmelCase : Dict[str, Type[BaseExtractor]] = { "tar": TarExtractor, "gzip": GzipExtractor, "zip": ZipExtractor, "xz": XzExtractor, "rar": RarExtractor, "zstd": ZstdExtractor, "bz2": BzipaExtractor, "7z": SevenZipExtractor, # <Added version="2.4.0"/> "lz4": LzaExtractor, # <Added version="2.4.0"/> } @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any]): return max( len(lowerCAmelCase__) for extractor in cls.extractors.values() if issubclass(lowerCAmelCase__ , lowerCAmelCase__) for extractor_magic_number in extractor.magic_numbers) @staticmethod def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Union[Path, str] , lowerCAmelCase__ : int): try: return MagicNumberBaseExtractor.read_magic_number(lowerCAmelCase__ , magic_number_length=lowerCAmelCase__) except OSError: return b"" @classmethod def _SCREAMING_SNAKE_CASE ( cls : str , lowerCAmelCase__ : Union[Path, str] , lowerCAmelCase__ : bool = False): warnings.warn( "Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. " "Use 'infer_extractor_format' instead." , category=lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: str = cls.infer_extractor_format(lowerCAmelCase__) if extractor_format: return True if not return_extractor else (True, cls.extractors[extractor_format]) return False if not return_extractor else (False, None) @classmethod def _SCREAMING_SNAKE_CASE ( cls : int , lowerCAmelCase__ : Union[Path, str]): # <Added version="2.4.0"/> SCREAMING_SNAKE_CASE_: Tuple = cls._get_magic_number_max_length() SCREAMING_SNAKE_CASE_: Optional[Any] = cls._read_magic_number(lowerCAmelCase__ , lowerCAmelCase__) for extractor_format, extractor in cls.extractors.items(): if extractor.is_extractable(lowerCAmelCase__ , magic_number=lowerCAmelCase__): return extractor_format @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] , lowerCAmelCase__ : Union[Path, str] , lowerCAmelCase__ : Union[Path, str] , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[BaseExtractor] = "deprecated" , ): os.makedirs(os.path.dirname(lowerCAmelCase__) , exist_ok=lowerCAmelCase__) # Prevent parallel extractions SCREAMING_SNAKE_CASE_: List[Any] = str(Path(lowerCAmelCase__).with_suffix(".lock")) with FileLock(lowerCAmelCase__): shutil.rmtree(lowerCAmelCase__ , ignore_errors=lowerCAmelCase__) if extractor_format or extractor != "deprecated": if extractor != "deprecated" or not isinstance(lowerCAmelCase__ , lowerCAmelCase__): # passed as positional arg warnings.warn( "Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. " "Use 'extractor_format' instead." , category=lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: int = extractor if extractor != "deprecated" else extractor_format else: SCREAMING_SNAKE_CASE_: Tuple = cls.extractors[extractor_format] return extractor.extract(lowerCAmelCase__ , lowerCAmelCase__) else: warnings.warn( "Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an " "exception in 3.0.0." , category=lowerCAmelCase__ , ) for extractor in cls.extractors.values(): if extractor.is_extractable(lowerCAmelCase__): return extractor.extract(lowerCAmelCase__ , lowerCAmelCase__)
371
from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class __lowercase : """simple docstring""" _UpperCAmelCase : int _UpperCAmelCase : int class __lowercase : """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : int): SCREAMING_SNAKE_CASE_: list[list[Edge]] = [[] for _ in range(lowerCAmelCase__)] SCREAMING_SNAKE_CASE_: Dict = size def __getitem__( self : Dict , lowerCAmelCase__ : int): return iter(self._graph[vertex]) @property def _SCREAMING_SNAKE_CASE ( self : Tuple): return self._size def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int): if weight not in (0, 1): raise ValueError("Edge weight must be either 0 or 1.") if to_vertex < 0 or to_vertex >= self.size: raise ValueError("Vertex indexes must be in [0; size).") self._graph[from_vertex].append(Edge(lowerCAmelCase__ , lowerCAmelCase__)) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int): SCREAMING_SNAKE_CASE_: Optional[int] = deque([start_vertex]) SCREAMING_SNAKE_CASE_: list[int | None] = [None] * self.size SCREAMING_SNAKE_CASE_: List[Any] = 0 while queue: SCREAMING_SNAKE_CASE_: int = queue.popleft() SCREAMING_SNAKE_CASE_: str = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: SCREAMING_SNAKE_CASE_: Optional[int] = current_distance + edge.weight SCREAMING_SNAKE_CASE_: str = distances[edge.destination_vertex] if ( isinstance(lowerCAmelCase__ , lowerCAmelCase__) and new_distance >= dest_vertex_distance ): continue SCREAMING_SNAKE_CASE_: Any = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex) else: queue.append(edge.destination_vertex) if distances[finish_vertex] is None: raise ValueError("No path from start_vertex to finish_vertex.") return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
127
0
"""simple docstring""" import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def _lowerCamelCase( a , a , a ): # Initialise PyTorch model __a = AlbertConfig.from_json_file(a ) print(F"Building PyTorch model from configuration: {config}" ) __a = AlbertForPreTraining(a ) # Load weights from tf checkpoint load_tf_weights_in_albert(a , a , a ) # Save pytorch-model print(F"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict() , a ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__:Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--albert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained ALBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) SCREAMING_SNAKE_CASE__:Optional[int] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
261
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ): _snake_case : str = StableUnCLIPImgaImgPipeline _snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS _snake_case : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _snake_case : Optional[Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _snake_case : List[Any] = frozenset([] ) def a__ ( self ): __a = 32 __a = embedder_hidden_size # image encoding components __a = CLIPImageProcessor(crop_size=32 , size=32 ) torch.manual_seed(0 ) __a = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=lowerCamelCase , projection_dim=lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) ) # regular denoising components torch.manual_seed(0 ) __a = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase ) __a = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) __a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) __a = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) __a = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase , layers_per_block=1 , upcast_attention=lowerCamelCase , use_linear_projection=lowerCamelCase , ) torch.manual_seed(0 ) __a = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , ) torch.manual_seed(0 ) __a = AutoencoderKL() __a = { # image encoding components "feature_extractor": feature_extractor, "image_encoder": image_encoder.eval(), # image noising components "image_normalizer": image_normalizer.eval(), "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder.eval(), "unet": unet.eval(), "scheduler": scheduler, "vae": vae.eval(), } return components def a__ ( self , lowerCamelCase , lowerCamelCase=0 , lowerCamelCase=True ): if str(lowerCamelCase ).startswith("mps" ): __a = torch.manual_seed(lowerCamelCase ) else: __a = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase ) __a = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase ) if pil_image: __a = input_image * 0.5 + 0.5 __a = input_image.clamp(0 , 1 ) __a = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() __a = DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def a__ ( self ): __a = "cpu" # ensure determinism for the device-dependent torch.Generator __a = self.get_dummy_components() __a = StableUnCLIPImgaImgPipeline(**lowerCamelCase ) __a = sd_pipe.to(lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase ) __a = self.get_dummy_inputs(lowerCamelCase ) inputs.update({"image_embeds": None} ) __a = sd_pipe(**lowerCamelCase ).images __a = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __a = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def a__ ( self ): __a = torch_device in ["cpu", "mps"] self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase ) def a__ ( self ): __a = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def a__ ( self ): self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase ) @slow @require_torch_gpu class snake_case__ ( unittest.TestCase ): def a__ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self ): __a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) __a = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" ) __a = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __a = torch.Generator(device="cpu" ).manual_seed(0 ) __a = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" ) __a = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase ) def a__ ( self ): __a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) __a = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" ) __a = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __a = torch.Generator(device="cpu" ).manual_seed(0 ) __a = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" ) __a = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase ) def a__ ( self ): __a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __a = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa ) __a = pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __a = pipe( lowerCamelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , ) __a = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
261
1
'''simple docstring''' import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class _lowercase ( _lowercase ): a = (DPMSolverSDEScheduler,) a = 10 def lowerCamelCase_ ( self: Any , **UpperCamelCase__: Union[str, Any] ): lowerCamelCase__ : Optional[Any] = { """num_train_timesteps""": 1_100, """beta_start""": 0.0_001, """beta_end""": 0.02, """beta_schedule""": """linear""", """noise_sampler_seed""": 0, } config.update(**UpperCamelCase__ ) return config def lowerCamelCase_ ( self: str ): for timesteps in [10, 50, 100, 1_000]: self.check_over_configs(num_train_timesteps=UpperCamelCase__ ) def lowerCamelCase_ ( self: str ): for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ): self.check_over_configs(beta_start=UpperCamelCase__ , beta_end=UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[Any] ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[int] ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=UpperCamelCase__ ) def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : int = self.scheduler_classes[0] lowerCamelCase__ : int = self.get_scheduler_config() lowerCamelCase__ : str = scheduler_class(**UpperCamelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) lowerCamelCase__ : Dict = self.dummy_model() lowerCamelCase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCamelCase__ : Optional[int] = sample.to(UpperCamelCase__ ) for i, t in enumerate(scheduler.timesteps ): lowerCamelCase__ : Any = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Dict = model(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Optional[int] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Any = output.prev_sample lowerCamelCase__ : Dict = torch.sum(torch.abs(UpperCamelCase__ ) ) lowerCamelCase__ : str = torch.mean(torch.abs(UpperCamelCase__ ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1e-2 assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1e-2 assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1e-2 assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1e-3 def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : str = self.scheduler_classes[0] lowerCamelCase__ : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""" ) lowerCamelCase__ : int = scheduler_class(**UpperCamelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) lowerCamelCase__ : Any = self.dummy_model() lowerCamelCase__ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCamelCase__ : Dict = sample.to(UpperCamelCase__ ) for i, t in enumerate(scheduler.timesteps ): lowerCamelCase__ : Union[str, Any] = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Tuple = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Tuple = output.prev_sample lowerCamelCase__ : int = torch.sum(torch.abs(UpperCamelCase__ ) ) lowerCamelCase__ : Optional[int] = torch.mean(torch.abs(UpperCamelCase__ ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1e-2 assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1e-2 assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1e-2 assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1e-3 def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : List[Any] = self.scheduler_classes[0] lowerCamelCase__ : str = self.get_scheduler_config() lowerCamelCase__ : Union[str, Any] = scheduler_class(**UpperCamelCase__ ) scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase__ ) lowerCamelCase__ : Dict = self.dummy_model() lowerCamelCase__ : int = self.dummy_sample_deter.to(UpperCamelCase__ ) * scheduler.init_noise_sigma for t in scheduler.timesteps: lowerCamelCase__ : Dict = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = model(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Optional[int] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = output.prev_sample lowerCamelCase__ : Any = torch.sum(torch.abs(UpperCamelCase__ ) ) lowerCamelCase__ : List[Any] = torch.mean(torch.abs(UpperCamelCase__ ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1e-2 assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1e-2 assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1e-2 assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1e-3 def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ : List[str] = self.scheduler_classes[0] lowerCamelCase__ : Any = self.get_scheduler_config() lowerCamelCase__ : Optional[int] = scheduler_class(**UpperCamelCase__ , use_karras_sigmas=UpperCamelCase__ ) scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase__ ) lowerCamelCase__ : Any = self.dummy_model() lowerCamelCase__ : Union[str, Any] = self.dummy_sample_deter.to(UpperCamelCase__ ) * scheduler.init_noise_sigma lowerCamelCase__ : int = sample.to(UpperCamelCase__ ) for t in scheduler.timesteps: lowerCamelCase__ : str = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Optional[int] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = output.prev_sample lowerCamelCase__ : Union[str, Any] = torch.sum(torch.abs(UpperCamelCase__ ) ) lowerCamelCase__ : Tuple = torch.mean(torch.abs(UpperCamelCase__ ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1e-2 assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1e-2 assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1e-2 assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1e-2
357
'''simple docstring''' import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs _A : int =imread(r'''digital_image_processing/image_data/lena_small.jpg''') _A : Optional[Any] =cvtColor(img, COLOR_BGR2GRAY) def SCREAMING_SNAKE_CASE_ () -> Any: lowerCamelCase__ : int = cn.convert_to_negative(UpperCamelCase ) # assert negative_img array for at least one True assert negative_img.any() def SCREAMING_SNAKE_CASE_ () -> Optional[int]: with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img: # Work around assertion for response assert str(cc.change_contrast(UpperCamelCase , 110 ) ).startswith( """<PIL.Image.Image image mode=RGB size=100x100 at""" ) def SCREAMING_SNAKE_CASE_ () -> Union[str, Any]: lowerCamelCase__ : Optional[int] = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def SCREAMING_SNAKE_CASE_ () -> str: lowerCamelCase__ : int = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 ) # assert ambiguous array for all == True assert canny_img.all() lowerCamelCase__ : Union[str, Any] = canny.canny(UpperCamelCase ) # assert canny array for at least one True assert canny_array.any() def SCREAMING_SNAKE_CASE_ () -> str: assert gg.gaussian_filter(UpperCamelCase , 5 , sigma=0.9 ).all() def SCREAMING_SNAKE_CASE_ () -> int: # laplace diagonals lowerCamelCase__ : int = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) lowerCamelCase__ : int = conv.img_convolve(UpperCamelCase , UpperCamelCase ).astype(UpperCamelCase ) assert res.any() def SCREAMING_SNAKE_CASE_ () -> Any: assert med.median_filter(UpperCamelCase , 3 ).any() def SCREAMING_SNAKE_CASE_ () -> Any: lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = sob.sobel_filter(UpperCamelCase ) assert grad.any() and theta.any() def SCREAMING_SNAKE_CASE_ () -> Tuple: lowerCamelCase__ : Union[str, Any] = sp.make_sepia(UpperCamelCase , 20 ) assert sepia.all() def SCREAMING_SNAKE_CASE_ (UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[Any]: lowerCamelCase__ : Union[str, Any] = bs.Burkes(imread(UpperCamelCase , 1 ) , 120 ) burkes.process() assert burkes.output_img.any() def SCREAMING_SNAKE_CASE_ (UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ) -> Optional[Any]: lowerCamelCase__ : int = rs.NearestNeighbour(imread(UpperCamelCase , 1 ) , 400 , 200 ) nn.process() assert nn.output.any() def SCREAMING_SNAKE_CASE_ () -> Optional[int]: lowerCamelCase__ : Union[str, Any] = """digital_image_processing/image_data/lena.jpg""" # Reading the image and converting it to grayscale. lowerCamelCase__ : Tuple = imread(UpperCamelCase , 0 ) # Test for get_neighbors_pixel function() return not None lowerCamelCase__ : int = 0 lowerCamelCase__ : Dict = 0 lowerCamelCase__ : Optional[Any] = image[x_coordinate][y_coordinate] lowerCamelCase__ : str = lbp.get_neighbors_pixel( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image lowerCamelCase__ : List[str] = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): lowerCamelCase__ : List[str] = lbp.local_binary_value(UpperCamelCase , UpperCamelCase , UpperCamelCase ) assert lbp_image.any()
129
0
'''simple docstring''' import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict=1_4 , UpperCamelCase__ : str=7 , UpperCamelCase__ : str=True , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Any=9_9 , UpperCamelCase__ : Tuple=3_2 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : Tuple=3_7 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Optional[Any]=5_1_2 , UpperCamelCase__ : Optional[int]=0.0_2 , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = seq_length UpperCamelCase = is_training UpperCamelCase = use_input_mask UpperCamelCase = use_token_type_ids UpperCamelCase = use_labels UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = rotary_dim UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = initializer_range UpperCamelCase = None UpperCamelCase = vocab_size - 1 UpperCamelCase = vocab_size - 1 UpperCamelCase = vocab_size - 1 def A ( self : List[str] ): """simple docstring""" UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase = None if self.use_input_mask: UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def A ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs UpperCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict def A ( self : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] ): """simple docstring""" UpperCamelCase = 2_0 UpperCamelCase = model_class_name(UpperCamelCase__ ) UpperCamelCase = model.init_cache(input_ids.shape[0] , UpperCamelCase__ ) UpperCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' ) UpperCamelCase = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) UpperCamelCase = model( input_ids[:, :-1] , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , position_ids=UpperCamelCase__ , ) UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' ) UpperCamelCase = model( input_ids[:, -1:] , attention_mask=UpperCamelCase__ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCamelCase__ , ) UpperCamelCase = model(UpperCamelCase__ ) UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" ) def A ( self : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ): """simple docstring""" UpperCamelCase = 2_0 UpperCamelCase = model_class_name(UpperCamelCase__ ) UpperCamelCase = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) UpperCamelCase = model.init_cache(input_ids.shape[0] , UpperCamelCase__ ) UpperCamelCase = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) UpperCamelCase = model( input_ids[:, :-1] , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , position_ids=UpperCamelCase__ , ) UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' ) UpperCamelCase = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCamelCase__ , position_ids=UpperCamelCase__ , ) UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ) UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" ) @require_flax class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () _SCREAMING_SNAKE_CASE = (FlaxGPTJForCausalLM,) if is_flax_available() else () def A ( self : Any ): """simple docstring""" UpperCamelCase = FlaxGPTJModelTester(self ) def A ( self : int ): """simple docstring""" for model_class_name in self.all_model_classes: UpperCamelCase , UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def A ( self : Any ): """simple docstring""" for model_class_name in self.all_model_classes: UpperCamelCase , UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) @tooslow def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' ) UpperCamelCase = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=UpperCamelCase__ , truncation=UpperCamelCase__ ) UpperCamelCase = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' ) UpperCamelCase = False UpperCamelCase = model.config.eos_token_id UpperCamelCase = jax.jit(model.generate ) UpperCamelCase = jit_generate( inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences UpperCamelCase = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ ) UpperCamelCase = [ 'Hello this is a long string of text.\n\nI\'m trying to get the text of the', 'Hey, I\'m a little late to the party. I\'m going to', ] self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) @is_pt_flax_cross_test def A ( self : Tuple ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs UpperCamelCase = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning UpperCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase , UpperCamelCase = pt_inputs['input_ids'].shape UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(UpperCamelCase__ ): UpperCamelCase = 0 UpperCamelCase = 1 UpperCamelCase = 0 UpperCamelCase = 1 UpperCamelCase = pt_model_class(UpperCamelCase__ ).eval() UpperCamelCase = model_class(UpperCamelCase__ , dtype=jnp.floataa ) UpperCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCamelCase__ ) UpperCamelCase = fx_state with torch.no_grad(): UpperCamelCase = pt_model(**UpperCamelCase__ ).to_tuple() UpperCamelCase = fx_model(**UpperCamelCase__ ).to_tuple() self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(UpperCamelCase__ , UpperCamelCase__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(UpperCamelCase__ ) UpperCamelCase = model_class.from_pretrained(UpperCamelCase__ , from_pt=UpperCamelCase__ ) UpperCamelCase = fx_model_loaded(**UpperCamelCase__ ).to_tuple() self.assertEqual( len(UpperCamelCase__ ) , len(UpperCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output_loaded, pt_output in zip(UpperCamelCase__ , UpperCamelCase__ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @is_pt_flax_cross_test def A ( self : Dict ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs UpperCamelCase = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning UpperCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = pt_model_class(UpperCamelCase__ ).eval() UpperCamelCase = model_class(UpperCamelCase__ , dtype=jnp.floataa ) UpperCamelCase = load_flax_weights_in_pytorch_model(UpperCamelCase__ , fx_model.params ) UpperCamelCase , UpperCamelCase = pt_inputs['input_ids'].shape UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(UpperCamelCase__ ): UpperCamelCase = 0 UpperCamelCase = 1 UpperCamelCase = 0 UpperCamelCase = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): UpperCamelCase = pt_model(**UpperCamelCase__ ).to_tuple() UpperCamelCase = fx_model(**UpperCamelCase__ ).to_tuple() self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(UpperCamelCase__ , UpperCamelCase__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(UpperCamelCase__ ) UpperCamelCase = pt_model_class.from_pretrained(UpperCamelCase__ , from_flax=UpperCamelCase__ ) with torch.no_grad(): UpperCamelCase = pt_model_loaded(**UpperCamelCase__ ).to_tuple() self.assertEqual( len(UpperCamelCase__ ) , len(UpperCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(UpperCamelCase__ , UpperCamelCase__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @tooslow def A ( self : List[str] ): """simple docstring""" for model_class_name in self.all_model_classes: UpperCamelCase = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' ) UpperCamelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCamelCase__ )
28
'''simple docstring''' from sklearn.metrics import fa_score import datasets _UpperCamelCase = ''' The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation: F1 = 2 * (precision * recall) / (precision + recall) ''' _UpperCamelCase = ''' Args: predictions (`list` of `int`): Predicted labels. references (`list` of `int`): Ground truth labels. labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None. pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1. average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`. - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary. - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives. - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall. - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). sample_weight (`list` of `float`): Sample weights Defaults to None. Returns: f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better. Examples: Example 1-A simple binary example >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0]) >>> print(results) {\'f1\': 0.5} Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`. >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0) >>> print(round(results[\'f1\'], 2)) 0.67 Example 3-The same simple binary example as in Example 1, but with `sample_weight` included. >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3]) >>> print(round(results[\'f1\'], 2)) 0.35 Example 4-A multiclass example, with different values for the `average` input. >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro") >>> print(round(results[\'f1\'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro") >>> print(round(results[\'f1\'], 2)) 0.33 >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted") >>> print(round(results[\'f1\'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {\'f1\': array([0.8, 0. , 0. ])} ''' _UpperCamelCase = ''' @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _A ( datasets.Metric ): def __A ( self ) -> List[str]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=1 , __UpperCAmelCase="binary" , __UpperCAmelCase=None ) -> Any: '''simple docstring''' __UpperCAmelCase : List[Any] = fa_score( __UpperCAmelCase , __UpperCAmelCase , labels=__UpperCAmelCase , pos_label=__UpperCAmelCase , average=__UpperCAmelCase , sample_weight=__UpperCAmelCase ) return {"f1": float(__UpperCAmelCase ) if score.size == 1 else score}
254
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""", } class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" lowerCamelCase : List[str] ="data2vec-text" def __init__( self : Dict , lowerCAmelCase : Union[str, Any]=3_05_22 , lowerCAmelCase : List[Any]=7_68 , lowerCAmelCase : str=12 , lowerCAmelCase : Optional[int]=12 , lowerCAmelCase : Dict=30_72 , lowerCAmelCase : List[str]="gelu" , lowerCAmelCase : str=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Dict=5_12 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : int=1e-12 , lowerCAmelCase : List[str]=1 , lowerCAmelCase : int=0 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[str]="absolute" , lowerCAmelCase : str=True , lowerCAmelCase : List[str]=None , **lowerCAmelCase : Union[str, Any] , ) -> Any: """simple docstring""" super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase ) __lowerCAmelCase : str = vocab_size __lowerCAmelCase : List[str] = hidden_size __lowerCAmelCase : List[Any] = num_hidden_layers __lowerCAmelCase : str = num_attention_heads __lowerCAmelCase : List[str] = hidden_act __lowerCAmelCase : Tuple = intermediate_size __lowerCAmelCase : Optional[int] = hidden_dropout_prob __lowerCAmelCase : List[str] = attention_probs_dropout_prob __lowerCAmelCase : List[Any] = max_position_embeddings __lowerCAmelCase : Tuple = type_vocab_size __lowerCAmelCase : List[str] = initializer_range __lowerCAmelCase : Dict = layer_norm_eps __lowerCAmelCase : Dict = position_embedding_type __lowerCAmelCase : Union[str, Any] = use_cache __lowerCAmelCase : List[str] = classifier_dropout class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" @property def SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": __lowerCAmelCase : int = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __lowerCAmelCase : Tuple = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
139
# Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position __UpperCAmelCase = """2.13.1""" import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse("""3.7"""): raise ImportWarning( """To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition.""" ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( """To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n""" """If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.""" ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip __UpperCAmelCase = concatenate_datasets __UpperCAmelCase = DownloadConfig __UpperCAmelCase = DownloadManager __UpperCAmelCase = DownloadMode __UpperCAmelCase = DownloadConfig __UpperCAmelCase = DownloadMode __UpperCAmelCase = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
139
1
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss __A =pytest.mark.integration @require_faiss class _SCREAMING_SNAKE_CASE ( snake_case_ ): def SCREAMING_SNAKE_CASE_( self ) -> List[str]: lowerCamelCase_ = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(lowercase ) for x in np.arange(30 ).tolist()]} ) return dset def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]: import faiss lowerCamelCase_ = self._create_dummy_dataset() lowerCamelCase_ = dset.map( lambda lowercase , lowercase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowercase , keep_in_memory=lowercase ) lowerCamelCase_ = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT ) lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) dset.drop_index("vecs" ) def SCREAMING_SNAKE_CASE_( self ) -> Dict: import faiss lowerCamelCase_ = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , ) lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]: import faiss lowerCamelCase_ = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file: dset.save_faiss_index("vecs" , tmp_file.name ) dset.load_faiss_index("vecs2" , tmp_file.name ) os.unlink(tmp_file.name ) lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]: lowerCamelCase_ = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" ) dset.drop_index("vecs" ) self.assertRaises(lowercase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) ) def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]: from elasticsearch import Elasticsearch lowerCamelCase_ = self._create_dummy_dataset() with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: lowerCamelCase_ = {"acknowledged": True} mocked_bulk.return_value([(True, None)] * 30 ) lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 29}]}} lowerCamelCase_ = Elasticsearch() dset.add_elasticsearch_index("filename" , es_client=lowercase ) lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("filename" , "my_name-train_29" ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) @require_faiss class _SCREAMING_SNAKE_CASE ( snake_case_ ): def SCREAMING_SNAKE_CASE_( self ) -> Tuple: import faiss lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query lowerCamelCase_ = np.zeros(5 , dtype=np.floataa ) lowerCamelCase_ = 1 lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase ) self.assertRaises(lowercase , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries lowerCamelCase_ = np.eye(5 , dtype=np.floataa )[::-1] lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase ) self.assertRaises(lowercase , index.search_batch , queries[0] ) lowerCamelCase_ = [scores[0] for scores in total_scores] lowerCamelCase_ = [indices[0] for indices in total_indices] self.assertGreater(np.min(lowercase ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , lowercase ) def SCREAMING_SNAKE_CASE_( self ) -> Any: import faiss lowerCamelCase_ = FaissIndex(string_factory="Flat" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) lowerCamelCase_ = FaissIndex(string_factory="LSH" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(lowercase ): lowerCamelCase_ = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) ) def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]: import faiss lowerCamelCase_ = faiss.IndexFlat(5 ) lowerCamelCase_ = FaissIndex(custom_index=lowercase ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def SCREAMING_SNAKE_CASE_( self ) -> List[str]: import faiss lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file: index.save(tmp_file.name ) lowerCamelCase_ = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) lowerCamelCase_ = np.zeros(5 , dtype=np.floataa ) lowerCamelCase_ = 1 lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def lowerCamelCase_ ( lowerCamelCase__ ): import faiss lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) lowerCamelCase_ = "index.faiss" lowerCamelCase_ = F'mock://{index_name}' index.save(lowerCamelCase__ , storage_options=mockfs.storage_options ) lowerCamelCase_ = FaissIndex.load(lowerCamelCase__ , storage_options=mockfs.storage_options ) lowerCamelCase_ = np.zeros(5 , dtype=np.floataa ) lowerCamelCase_ = 1 lowerCamelCase_ , lowerCamelCase_ = index.search(lowerCamelCase__ ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class _SCREAMING_SNAKE_CASE ( snake_case_ ): def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]: from elasticsearch import Elasticsearch with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: lowerCamelCase_ = Elasticsearch() lowerCamelCase_ = {"acknowledged": True} lowerCamelCase_ = ElasticSearchIndex(es_client=lowercase ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(["foo", "bar", "foobar"] ) # single query lowerCamelCase_ = "foo" lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout lowerCamelCase_ = "foo" lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries lowerCamelCase_ = ["foo", "bar", "foobar"] lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase ) lowerCamelCase_ = [scores[0] for scores in total_scores] lowerCamelCase_ = [indices[0] for indices in total_indices] self.assertGreater(np.min(lowercase ) , 0 ) self.assertListEqual([1, 1, 1] , lowercase ) # batched queries with timeout lowerCamelCase_ = ["foo", "bar", "foobar"] lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase , request_timeout=30 ) lowerCamelCase_ = [scores[0] for scores in total_scores] lowerCamelCase_ = [indices[0] for indices in total_indices] self.assertGreater(np.min(lowercase ) , 0 ) self.assertListEqual([1, 1, 1] , lowercase )
19
def __lowercase ( a__ , a__ ) -> bool: return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
257
0
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int = 3 ) -> Dict: if isinstance(_A , _A ): raise TypeError("""number of qubits must be a integer.""" ) if number_of_qubits <= 0: raise ValueError("""number of qubits must be > 0.""" ) if math.floor(_A ) != number_of_qubits: raise ValueError("""number of qubits must be exact integer.""" ) if number_of_qubits > 10: raise ValueError("""number of qubits too large to simulate(>10).""" ) __lowerCAmelCase : Optional[int] = QuantumRegister(_A , """qr""" ) __lowerCAmelCase : int = ClassicalRegister(_A , """cr""" ) __lowerCAmelCase : Tuple = QuantumCircuit(_A , _A ) __lowerCAmelCase : Any = number_of_qubits for i in range(_A ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_A ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _A , _A ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_A , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_A , _A ) # simulate with 10000 shots __lowerCAmelCase : Dict = Aer.get_backend("""qasm_simulator""" ) __lowerCAmelCase : str = execute(_A , _A , shots=10_000 ) return job.result().get_counts(_A ) if __name__ == "__main__": print( f'''Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}''' )
353
from __future__ import annotations import time import numpy as np _UpperCAmelCase = [8, 5, 9, 7] _UpperCAmelCase = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] _UpperCAmelCase = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class snake_case_ : def __init__( self : Union[str, Any] , _snake_case : list[int] , _snake_case : list[list[int]] , _snake_case : list[list[int]] , )->None: '''simple docstring''' __lowerCAmelCase : str = claim_vector __lowerCAmelCase : List[Any] = allocated_resources_table __lowerCAmelCase : str = maximum_claim_table def UpperCAmelCase__ ( self : Tuple )->list[int]: '''simple docstring''' return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def UpperCAmelCase__ ( self : int )->list[int]: '''simple docstring''' return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def UpperCAmelCase__ ( self : Optional[int] )->list[list[int]]: '''simple docstring''' return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(_snake_case ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def UpperCAmelCase__ ( self : Union[str, Any] )->dict[int, list[int]]: '''simple docstring''' return {self.__need().index(_snake_case ): i for i in self.__need()} def UpperCAmelCase__ ( self : Dict , **_snake_case : Optional[Any] )->None: '''simple docstring''' __lowerCAmelCase : Optional[int] = self.__need() __lowerCAmelCase : Any = self.__allocated_resources_table __lowerCAmelCase : List[Any] = self.__available_resources() __lowerCAmelCase : Optional[Any] = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print("""_""" * 50 + """\n""" ) while need_list: __lowerCAmelCase : Optional[Any] = False for each_need in need_list: __lowerCAmelCase : Optional[int] = True for index, need in enumerate(_snake_case ): if need > available_resources[index]: __lowerCAmelCase : int = False break if execution: __lowerCAmelCase : int = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: __lowerCAmelCase : Any = original_need_index print(F'''Process {process_number + 1} is executing.''' ) # remove the process run from stack need_list.remove(_snake_case ) # update available/freed resources stack __lowerCAmelCase : int = np.array(_snake_case ) + np.array( alloc_resources_table[process_number] ) print( """Updated available resource stack for processes: """ + """ """.join([str(_snake_case ) for x in available_resources] ) ) break if safe: print("""The process is in a safe state.\n""" ) else: print("""System in unsafe state. Aborting...\n""" ) break def UpperCAmelCase__ ( self : List[Any] )->int: '''simple docstring''' print(""" """ * 9 + """Allocated Resource Table""" ) for item in self.__allocated_resources_table: print( F'''P{self.__allocated_resources_table.index(_snake_case ) + 1}''' + """ """.join(F'''{it:>8}''' for it in item ) + """\n""" ) print(""" """ * 9 + """System Resource Table""" ) for item in self.__maximum_claim_table: print( F'''P{self.__maximum_claim_table.index(_snake_case ) + 1}''' + """ """.join(F'''{it:>8}''' for it in item ) + """\n""" ) print( """Current Usage by Active Processes: """ + """ """.join(str(_snake_case ) for x in self.__claim_vector ) ) print( """Initial Available Resources: """ + """ """.join(str(_snake_case ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
232
0
from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case__ (_A ): """simple docstring""" def __UpperCAmelCase ( self : str ) -> Dict: a = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__lowerCamelCase , "embed_dim" ) ) self.parent.assertTrue(hasattr(__lowerCamelCase , "num_heads" ) ) class snake_case__ : """simple docstring""" def __init__( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=13 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : Any=[16, 48, 96] , __lowerCamelCase : Any=[1, 3, 6] , __lowerCamelCase : str=[1, 2, 10] , __lowerCamelCase : List[str]=[7, 3, 3] , __lowerCamelCase : Dict=[4, 2, 2] , __lowerCamelCase : List[str]=[2, 1, 1] , __lowerCamelCase : List[Any]=[2, 2, 2] , __lowerCamelCase : List[Any]=[False, False, True] , __lowerCamelCase : str=[0.0, 0.0, 0.0] , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Union[str, Any]=1e-12 , __lowerCamelCase : int=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Any=2 , ) -> Union[str, Any]: a = parent a = batch_size a = image_size a = patch_sizes a = patch_stride a = patch_padding a = is_training a = use_labels a = num_labels a = num_channels a = embed_dim a = num_heads a = stride_kv a = depth a = cls_token a = attention_drop_rate a = initializer_range a = layer_norm_eps def __UpperCAmelCase ( self : str ) -> Any: a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a = None if self.use_labels: # create a random int32 tensor of given shape a = ids_tensor([self.batch_size] , self.num_labels ) a = self.get_config() return config, pixel_values, labels def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]: return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ) -> Any: a = TFCvtModel(config=__lowerCamelCase ) a = model(__lowerCamelCase , training=__lowerCamelCase ) a = (self.image_size, self.image_size) a = image_size[0], image_size[1] for i in range(len(self.depth ) ): a = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) a = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Tuple ) -> Any: a = self.num_labels a = TFCvtForImageClassification(__lowerCamelCase ) a = model(__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCAmelCase ( self : Optional[Any] ) -> Any: a = self.prepare_config_and_inputs() a = config_and_inputs a = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class snake_case__ (_A , _A , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () SCREAMING_SNAKE_CASE_ : Union[str, Any] = ( {"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification} if is_tf_available() else {} ) SCREAMING_SNAKE_CASE_ : Optional[Any] = False SCREAMING_SNAKE_CASE_ : Dict = False SCREAMING_SNAKE_CASE_ : Dict = False SCREAMING_SNAKE_CASE_ : List[Any] = False SCREAMING_SNAKE_CASE_ : List[str] = False def __UpperCAmelCase ( self : Optional[int] ) -> Tuple: a = TFCvtModelTester(self ) a = TFCvtConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 ) def __UpperCAmelCase ( self : Tuple ) -> Dict: self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason="Cvt does not output attentions" ) def __UpperCAmelCase ( self : Optional[Any] ) -> str: pass @unittest.skip(reason="Cvt does not use inputs_embeds" ) def __UpperCAmelCase ( self : Dict ) -> Optional[Any]: pass @unittest.skip(reason="Cvt does not support input and output embeddings" ) def __UpperCAmelCase ( self : Tuple ) -> Optional[int]: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) def __UpperCAmelCase ( self : List[str] ) -> Optional[int]: super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) @slow def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: super().test_keras_fit() @unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8" ) def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]: a = tf.keras.mixed_precision.Policy("mixed_float16" ) tf.keras.mixed_precision.set_global_policy(__lowerCamelCase ) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy("float32" ) def __UpperCAmelCase ( self : Dict ) -> List[str]: a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__lowerCamelCase ) a = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a = [*signature.parameters.keys()] a = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def __UpperCAmelCase ( self : Tuple ) -> str: def check_hidden_states_output(__lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] ): a = model_class(__lowerCamelCase ) a = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) a = outputs.hidden_states a = len(self.model_tester.depth ) self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : int ) -> List[str]: a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]: a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def __UpperCAmelCase ( self : Any ) -> List[Any]: for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a = TFCvtModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def __magic_name__ ( ): '''simple docstring''' a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class snake_case__ (unittest.TestCase ): """simple docstring""" @cached_property def __UpperCAmelCase ( self : List[str] ) -> Optional[int]: return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def __UpperCAmelCase ( self : str ) -> List[Any]: a = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) a = self.default_image_processor a = prepare_img() a = image_processor(images=__lowerCamelCase , return_tensors="tf" ) # forward pass a = model(**__lowerCamelCase ) # verify the logits a = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) a = tf.constant([0.9_285, 0.9_015, -0.3_150] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __lowerCamelCase , atol=1e-4 ) )
107
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __A : List[Any] = { '''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[str] = [ '''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MegaForCausalLM''', '''MegaForMaskedLM''', '''MegaForMultipleChoice''', '''MegaForQuestionAnswering''', '''MegaForSequenceClassification''', '''MegaForTokenClassification''', '''MegaModel''', '''MegaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys __A : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
33
0
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def __UpperCAmelCase ( __a : Dict=None ) -> str: """simple docstring""" if subparsers is not None: _a : Union[str, Any] = subparsers.add_parser('''test''' ) else: _a : List[str] = argparse.ArgumentParser('''Accelerate test command''' ) parser.add_argument( '''--config_file''' ,default=__a ,help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) ,) if subparsers is not None: parser.set_defaults(func=__a ) return parser def __UpperCAmelCase ( __a : List[Any] ) -> Union[str, Any]: """simple docstring""" _a : Dict = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] ) if args.config_file is None: _a : List[Any] = script_name else: _a : Union[str, Any] = F"""--config_file={args.config_file} {script_name}""" _a : str = ['''accelerate-launch'''] + test_args.split() _a : str = execute_subprocess_async(__a ,env=os.environ.copy() ) if result.returncode == 0: print('''Test is a success! You are ready for your distributed training!''' ) def __UpperCAmelCase ( ) -> List[Any]: """simple docstring""" _a : Optional[int] = test_command_parser() _a : List[Any] = parser.parse_args() test_command(__a ) if __name__ == "__main__": main()
15
from __future__ import annotations def __UpperCAmelCase ( __a : list ) -> float: """simple docstring""" if not nums: raise ValueError('''List is empty''' ) return sum(__a ) / len(__a ) if __name__ == "__main__": import doctest doctest.testmod()
15
1
"""simple docstring""" from pathlib import Path import fire from tqdm import tqdm def UpperCAmelCase ( UpperCAmelCase="ro" , UpperCAmelCase="en" , UpperCAmelCase="wmt16" , UpperCAmelCase=None ) -> None: try: import datasets except (ModuleNotFoundError, ImportError): raise ImportError('run pip install datasets' ) snake_case_ = f'{src_lang}-{tgt_lang}' print(f'Converting {dataset}-{pair}' ) snake_case_ = datasets.load_dataset(UpperCAmelCase , UpperCAmelCase ) if save_dir is None: snake_case_ = f'{dataset}-{pair}' snake_case_ = Path(UpperCAmelCase ) save_dir.mkdir(exist_ok=UpperCAmelCase ) for split in ds.keys(): print(f'Splitting {split} with {ds[split].num_rows} records' ) # to save to val.source, val.target like summary datasets snake_case_ = 'val' if split == 'validation' else split snake_case_ = save_dir.joinpath(f'{fn}.source' ) snake_case_ = save_dir.joinpath(f'{fn}.target' ) snake_case_ = src_path.open('w+' ) snake_case_ = tgt_path.open('w+' ) # reader is the bottleneck so writing one record at a time doesn't slow things down for x in tqdm(ds[split] ): snake_case_ = x['translation'] src_fp.write(ex[src_lang] + '\n' ) tgt_fp.write(ex[tgt_lang] + '\n' ) print(f'Saved {dataset} dataset to {save_dir}' ) if __name__ == "__main__": fire.Fire(download_wmt_dataset)
69
class __SCREAMING_SNAKE_CASE( a_ ): pass class __SCREAMING_SNAKE_CASE( a_ ): pass class __SCREAMING_SNAKE_CASE: def __init__( self: List[str] ) -> Union[str, Any]: snake_case__ = [ [], [], [], ] def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: int , UpperCamelCase: int ) -> None: try: if len(self.queues[priority] ) >= 1_00: raise OverflowError('Maximum queue size is 100' ) self.queues[priority].append(UpperCamelCase ) except IndexError: raise ValueError('Valid priorities are 0, 1, and 2' ) def lowerCAmelCase_ ( self: List[Any] ) -> int: for queue in self.queues: if queue: return queue.pop(0 ) raise UnderFlowError('All queues are empty' ) def __str__( self: Union[str, Any] ) -> str: return "\n".join(F'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) ) class __SCREAMING_SNAKE_CASE: def __init__( self: Union[str, Any] ) -> Any: snake_case__ = [] def lowerCAmelCase_ ( self: str , UpperCamelCase: int ) -> None: if len(self.queue ) == 1_00: raise OverFlowError('Maximum queue size is 100' ) self.queue.append(UpperCamelCase ) def lowerCAmelCase_ ( self: int ) -> int: if not self.queue: raise UnderFlowError('The queue is empty' ) else: snake_case__ = min(self.queue ) self.queue.remove(UpperCamelCase ) return data def __str__( self: Optional[Any] ) -> str: return str(self.queue ) def a_ ( ) -> List[Any]: """simple docstring""" snake_case__ = FixedPriorityQueue() fpq.enqueue(0 , 10 ) fpq.enqueue(1 , 70 ) fpq.enqueue(0 , 100 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 64 ) fpq.enqueue(0 , 128 ) print(_A ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(_A ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def a_ ( ) -> List[Any]: """simple docstring""" snake_case__ = ElementPriorityQueue() epq.enqueue(10 ) epq.enqueue(70 ) epq.enqueue(100 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(64 ) epq.enqueue(128 ) print(_A ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(_A ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
307
0
import itertools import math def __snake_case ( __UpperCamelCase : int ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 ,int(math.sqrt(__UpperCamelCase ) + 1 ) ,6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __snake_case ( ): """simple docstring""" A_ = 2 while True: if is_prime(__UpperCamelCase ): yield num num += 1 def __snake_case ( __UpperCamelCase : int = 1_0001 ): """simple docstring""" return next(itertools.islice(prime_generator() ,nth - 1 ,__UpperCamelCase ) ) if __name__ == "__main__": print(F"{solution() = }")
329
import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def __snake_case ( __UpperCamelCase : Union[str, Any] ): """simple docstring""" if is_torch_version("<" ,"2.0.0" ) or not hasattr(__UpperCamelCase ,"_dynamo" ): return False return isinstance(__UpperCamelCase ,torch._dynamo.eval_frame.OptimizedModule ) def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : bool = True ): """simple docstring""" A_ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) A_ = is_compiled_module(__UpperCamelCase ) if is_compiled: A_ = model A_ = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = model.module if not keep_fpaa_wrapper: A_ = getattr(__UpperCamelCase ,"forward" ) A_ = model.__dict__.pop("_original_forward" ,__UpperCamelCase ) if original_forward is not None: while hasattr(__UpperCamelCase ,"__wrapped__" ): A_ = forward.__wrapped__ if forward == original_forward: break A_ = forward if getattr(__UpperCamelCase ,"_converted_to_transformer_engine" ,__UpperCamelCase ): convert_model(__UpperCamelCase ,to_transformer_engine=__UpperCamelCase ) if is_compiled: A_ = model A_ = compiled_model return model def __snake_case ( ): """simple docstring""" PartialState().wait_for_everyone() def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Any ): """simple docstring""" if PartialState().distributed_type == DistributedType.TPU: xm.save(__UpperCamelCase ,__UpperCamelCase ) elif PartialState().local_process_index == 0: torch.save(__UpperCamelCase ,__UpperCamelCase ) @contextmanager def __snake_case ( **__UpperCamelCase : Any ): """simple docstring""" for key, value in kwargs.items(): A_ = str(__UpperCamelCase ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def __snake_case ( __UpperCamelCase : Optional[Any] ): """simple docstring""" if not hasattr(__UpperCamelCase ,"__qualname__" ) and not hasattr(__UpperCamelCase ,"__name__" ): A_ = getattr(__UpperCamelCase ,"__class__" ,__UpperCamelCase ) if hasattr(__UpperCamelCase ,"__qualname__" ): return obj.__qualname__ if hasattr(__UpperCamelCase ,"__name__" ): return obj.__name__ return str(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ): """simple docstring""" for key, value in source.items(): if isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = destination.setdefault(__UpperCamelCase ,{} ) merge_dicts(__UpperCamelCase ,__UpperCamelCase ) else: A_ = value return destination def __snake_case ( __UpperCamelCase : int = None ): """simple docstring""" if port is None: A_ = 2_9500 with socket.socket(socket.AF_INET ,socket.SOCK_STREAM ) as s: return s.connect_ex(("localhost", port) ) == 0
329
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Union[str, Any] = logging.get_logger(__name__) __A : int = { '''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''', } class _UpperCAmelCase ( _A ): SCREAMING_SNAKE_CASE_ : List[str] = "open-llama" def __init__( self : Dict , A : Optional[Any]=10_00_00 , A : Optional[int]=40_96 , A : Tuple=1_10_08 , A : List[Any]=32 , A : Optional[Any]=32 , A : List[str]="silu" , A : Optional[int]=20_48 , A : Dict=0.02 , A : Tuple=1e-6 , A : Optional[int]=True , A : Dict=0 , A : Any=1 , A : Optional[Any]=2 , A : Tuple=False , A : Dict=True , A : Dict=0.1 , A : int=0.1 , A : Optional[Any]=True , A : List[str]=True , A : int=None , **A : Optional[int] , ) -> int: lowercase_ : Tuple = vocab_size lowercase_ : Tuple = max_position_embeddings lowercase_ : int = hidden_size lowercase_ : Any = intermediate_size lowercase_ : Dict = num_hidden_layers lowercase_ : str = num_attention_heads lowercase_ : Optional[Any] = hidden_act lowercase_ : List[Any] = initializer_range lowercase_ : Optional[Any] = rms_norm_eps lowercase_ : List[str] = use_cache lowercase_ : Dict = kwargs.pop( '''use_memorry_efficient_attention''' , A ) lowercase_ : List[Any] = hidden_dropout_prob lowercase_ : Tuple = attention_dropout_prob lowercase_ : Tuple = use_stable_embedding lowercase_ : Optional[int] = shared_input_output_embedding lowercase_ : Optional[int] = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=A , bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A , ) def A ( self : Optional[int] ) -> List[Any]: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , A ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' F'''got {self.rope_scaling}''' ) lowercase_ : Any = self.rope_scaling.get('''type''' , A ) lowercase_ : int = self.rope_scaling.get('''factor''' , A ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(A , A ) or rope_scaling_factor <= 1.0: raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
33
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def lowercase ( A_ , A_ , A_ = False )-> list[float]: '''simple docstring''' if radian_mode: return [magnitude * cos(A_ ), magnitude * sin(A_ )] return [magnitude * cos(radians(A_ ) ), magnitude * sin(radians(A_ ) )] def lowercase ( A_ , A_ , A_ = 10**-1 )-> bool: '''simple docstring''' a : NDArray[floataa] = cross(A_ , A_ ) a : float = sum(A_ ) return abs(A_ ) < eps if __name__ == "__main__": # Test to check if it works __lowercase = array( [ polar_force(7_18.4, 180 - 30), polar_force(8_79.54, 45), polar_force(100, -90), ] ) __lowercase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg __lowercase = array( [ polar_force(30 * 9.81, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) __lowercase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg __lowercase = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]]) __lowercase = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
40
0
import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _UpperCAmelCase : str = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""") @require_sentencepiece @require_tokenizers class lowercase ( lowercase_ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Any = PegasusTokenizer __SCREAMING_SNAKE_CASE : List[Any] = PegasusTokenizerFast __SCREAMING_SNAKE_CASE : Dict = True __SCREAMING_SNAKE_CASE : List[Any] = True def a ( self ): super().setUp() # We have a SentencePiece fixture for testing snake_case_ = PegasusTokenizer(snake_case ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def a ( self ): return PegasusTokenizer.from_pretrained('google/pegasus-large' ) def a ( self , **snake_case ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case ) def a ( self , snake_case ): return ("This is a test", "This is a test") def a ( self ): snake_case_ = '</s>' snake_case_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case ) def a ( self ): snake_case_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<pad>' ) self.assertEqual(vocab_keys[1] , '</s>' ) self.assertEqual(vocab_keys[-1] , 'v' ) self.assertEqual(len(snake_case ) , 1103 ) def a ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1103 ) def a ( self ): snake_case_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) snake_case_ = self.tokenizer_class.from_pretrained(self.tmpdirname ) snake_case_ = ( 'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important' ' </s> <pad> <pad> <pad>' ) snake_case_ = rust_tokenizer([raw_input_str] , return_tensors=snake_case , add_special_tokens=snake_case ).input_ids[0] snake_case_ = py_tokenizer([raw_input_str] , return_tensors=snake_case , add_special_tokens=snake_case ).input_ids[0] self.assertListEqual(snake_case , snake_case ) def a ( self ): snake_case_ = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word snake_case_ = '<mask_1> To ensure a <mask_2> flow of bank resolutions.' snake_case_ = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1] snake_case_ = tokenizer([raw_input_str] , return_tensors=snake_case ).input_ids[0] self.assertListEqual(snake_case , snake_case ) def a ( self ): snake_case_ = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 snake_case_ = 'To ensure a smooth flow of bank resolutions.' snake_case_ = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1] snake_case_ = tokenizer([raw_input_str] , return_tensors=snake_case ).input_ids[0] self.assertListEqual(snake_case , snake_case ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def a ( self ): snake_case_ = ['This is going to be way too long.' * 150, 'short example'] snake_case_ = ['not super long but more than 5 tokens', 'tiny'] snake_case_ = self._large_tokenizer(snake_case , padding=snake_case , truncation=snake_case , return_tensors='pt' ) snake_case_ = self._large_tokenizer( text_target=snake_case , max_length=5 , padding=snake_case , truncation=snake_case , return_tensors='pt' ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(snake_case ) == 2 # input_ids, attention_mask. @slow def a ( self ): # fmt: off snake_case_ = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , ) @require_sentencepiece @require_tokenizers class lowercase ( lowercase_ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = PegasusTokenizer __SCREAMING_SNAKE_CASE : int = PegasusTokenizerFast __SCREAMING_SNAKE_CASE : Optional[Any] = True __SCREAMING_SNAKE_CASE : List[str] = True def a ( self ): super().setUp() # We have a SentencePiece fixture for testing snake_case_ = PegasusTokenizer(snake_case , offset=0 , mask_token_sent=snake_case , mask_token='[MASK]' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def a ( self ): return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' ) def a ( self , **snake_case ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case ) def a ( self , snake_case ): return ("This is a test", "This is a test") def a ( self ): snake_case_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) snake_case_ = self.tokenizer_class.from_pretrained(self.tmpdirname ) snake_case_ = ( 'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>' ' <pad> <pad> <pad>' ) snake_case_ = rust_tokenizer([raw_input_str] , return_tensors=snake_case , add_special_tokens=snake_case ).input_ids[0] snake_case_ = py_tokenizer([raw_input_str] , return_tensors=snake_case , add_special_tokens=snake_case ).input_ids[0] self.assertListEqual(snake_case , snake_case ) @require_torch def a ( self ): snake_case_ = ['This is going to be way too long.' * 1000, 'short example'] snake_case_ = ['not super long but more than 5 tokens', 'tiny'] snake_case_ = self._large_tokenizer(snake_case , padding=snake_case , truncation=snake_case , return_tensors='pt' ) snake_case_ = self._large_tokenizer( text_target=snake_case , max_length=5 , padding=snake_case , truncation=snake_case , return_tensors='pt' ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(snake_case ) == 2 # input_ids, attention_mask. def a ( self ): snake_case_ = ( 'This is an example string that is used to test the original TF implementation against the HF' ' implementation' ) snake_case_ = self._large_tokenizer(snake_case ).input_ids self.assertListEqual( snake_case , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
200
import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class lowercase : def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=False , snake_case=False , snake_case=False , snake_case=2 , snake_case=99 , snake_case=0 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=2 , snake_case=0.02 , snake_case=2 , snake_case=4 , snake_case="last" , snake_case=True , snake_case=None , snake_case=0 , ): snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_input_lengths snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = gelu_activation snake_case_ = sinusoidal_embeddings snake_case_ = causal snake_case_ = asm snake_case_ = n_langs snake_case_ = vocab_size snake_case_ = n_special snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = num_labels snake_case_ = num_choices snake_case_ = summary_type snake_case_ = use_proj snake_case_ = scope snake_case_ = bos_token_id def a ( self ): snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = None if self.use_input_lengths: snake_case_ = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length snake_case_ = None if self.use_token_type_ids: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) snake_case_ = None snake_case_ = None snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case_ = ids_tensor([self.batch_size] , 2 ).float() snake_case_ = ids_tensor([self.batch_size] , self.num_choices ) snake_case_ = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def a ( self ): return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ): snake_case_ = XLMModel(config=snake_case ) model.to(snake_case ) model.eval() snake_case_ = model(snake_case , lengths=snake_case , langs=snake_case ) snake_case_ = model(snake_case , langs=snake_case ) snake_case_ = model(snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ): snake_case_ = XLMWithLMHeadModel(snake_case ) model.to(snake_case ) model.eval() snake_case_ = model(snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ): snake_case_ = XLMForQuestionAnsweringSimple(snake_case ) model.to(snake_case ) model.eval() snake_case_ = model(snake_case ) snake_case_ = model(snake_case , start_positions=snake_case , end_positions=snake_case ) snake_case_ = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ): snake_case_ = XLMForQuestionAnswering(snake_case ) model.to(snake_case ) model.eval() snake_case_ = model(snake_case ) snake_case_ = model( snake_case , start_positions=snake_case , end_positions=snake_case , cls_index=snake_case , is_impossible=snake_case , p_mask=snake_case , ) snake_case_ = model( snake_case , start_positions=snake_case , end_positions=snake_case , cls_index=snake_case , is_impossible=snake_case , ) ((snake_case_) , ) = result_with_labels.to_tuple() snake_case_ = model(snake_case , start_positions=snake_case , end_positions=snake_case ) ((snake_case_) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ): snake_case_ = XLMForSequenceClassification(snake_case ) model.to(snake_case ) model.eval() snake_case_ = model(snake_case ) snake_case_ = model(snake_case , labels=snake_case ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ): snake_case_ = self.num_labels snake_case_ = XLMForTokenClassification(snake_case ) model.to(snake_case ) model.eval() snake_case_ = model(snake_case , attention_mask=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ): snake_case_ = self.num_choices snake_case_ = XLMForMultipleChoice(config=snake_case ) model.to(snake_case ) model.eval() snake_case_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ = model( snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def a ( self ): snake_case_ = self.prepare_config_and_inputs() ( ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ) = config_and_inputs snake_case_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths} return config, inputs_dict @require_torch class lowercase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE : Tuple = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable __SCREAMING_SNAKE_CASE : int = ( { '''feature-extraction''': XLMModel, '''fill-mask''': XLMWithLMHeadModel, '''question-answering''': XLMForQuestionAnsweringSimple, '''text-classification''': XLMForSequenceClassification, '''text-generation''': XLMWithLMHeadModel, '''token-classification''': XLMForTokenClassification, '''zero-shot''': XLMForSequenceClassification, } if is_torch_available() else {} ) def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def a ( self , snake_case , snake_case , snake_case=False ): snake_case_ = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": snake_case_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case ) snake_case_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case ) return inputs_dict def a ( self ): snake_case_ = XLMModelTester(self ) snake_case_ = ConfigTester(self , config_class=snake_case , emb_dim=37 ) def a ( self ): self.config_tester.run_common_tests() def a ( self ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*snake_case ) def a ( self ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*snake_case ) def a ( self ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*snake_case ) def a ( self ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*snake_case ) def a ( self ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*snake_case ) def a ( self ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*snake_case ) def a ( self ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*snake_case ) def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=False , snake_case=1 ): self.assertIsInstance(snake_case , snake_case ) self.assertListEqual( [isinstance(snake_case , snake_case ) for iter_attentions in attentions] , [True] * len(snake_case ) ) self.assertEqual(len(snake_case ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(snake_case ): # adds PAD dummy token snake_case_ = min_length + idx + 1 snake_case_ = min_length + idx + 1 snake_case_ = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(snake_case ) ) def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=False , snake_case=1 ): self.assertIsInstance(snake_case , snake_case ) self.assertListEqual( [isinstance(snake_case , snake_case ) for iter_hidden_states in hidden_states] , [True] * len(snake_case ) , ) self.assertEqual(len(snake_case ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(snake_case ): # adds PAD dummy token snake_case_ = min_length + idx + 1 snake_case_ = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(snake_case ) , ) pass @slow def a ( self ): for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ = XLMModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) @require_torch class lowercase ( unittest.TestCase ): @slow def a ( self ): snake_case_ = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' ) model.to(snake_case ) snake_case_ = torch.tensor([[14, 447]] , dtype=torch.long , device=snake_case ) # the president snake_case_ = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference snake_case_ = model.generate(snake_case , do_sample=snake_case ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , snake_case )
200
1
'''simple docstring''' from __future__ import annotations from decimal import Decimal from numpy import array def _UpperCamelCase ( __A ) -> Optional[Any]: '''simple docstring''' UpperCamelCase__ = Decimal # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices if len(_lowerCamelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2: # Calculate the determinant of the matrix UpperCamelCase__ = float( d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) ) if determinant == 0: raise ValueError("This matrix has no inverse." ) # Creates a copy of the matrix with swapped positions of the elements UpperCamelCase__ = [[0.0, 0.0], [0.0, 0.0]] UpperCamelCase__ = matrix[1][1], matrix[0][0] UpperCamelCase__ = -matrix[1][0], -matrix[0][1] # Calculate the inverse of the matrix return [ [(float(d(_lowerCamelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix ] elif ( len(_lowerCamelCase ) == 3 and len(matrix[0] ) == 3 and len(matrix[1] ) == 3 and len(matrix[2] ) == 3 ): # Calculate the determinant of the matrix using Sarrus rule UpperCamelCase__ = float( ( (d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] )) + (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] )) + (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] )) ) - ( (d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] )) + (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] )) + (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] )) ) ) if determinant == 0: raise ValueError("This matrix has no inverse." ) # Creating cofactor matrix UpperCamelCase__ = [ [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], ] UpperCamelCase__ = (d(matrix[1][1] ) * d(matrix[2][2] )) - ( d(matrix[1][2] ) * d(matrix[2][1] ) ) UpperCamelCase__ = -( (d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] )) ) UpperCamelCase__ = (d(matrix[1][0] ) * d(matrix[2][1] )) - ( d(matrix[1][1] ) * d(matrix[2][0] ) ) UpperCamelCase__ = -( (d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] )) ) UpperCamelCase__ = (d(matrix[0][0] ) * d(matrix[2][2] )) - ( d(matrix[0][2] ) * d(matrix[2][0] ) ) UpperCamelCase__ = -( (d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] )) ) UpperCamelCase__ = (d(matrix[0][1] ) * d(matrix[1][2] )) - ( d(matrix[0][2] ) * d(matrix[1][1] ) ) UpperCamelCase__ = -( (d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] )) ) UpperCamelCase__ = (d(matrix[0][0] ) * d(matrix[1][1] )) - ( d(matrix[0][1] ) * d(matrix[1][0] ) ) # Transpose the cofactor matrix (Adjoint matrix) UpperCamelCase__ = array(_lowerCamelCase ) for i in range(3 ): for j in range(3 ): UpperCamelCase__ = cofactor_matrix[j][i] # Inverse of the matrix using the formula (1/determinant) * adjoint matrix UpperCamelCase__ = array(_lowerCamelCase ) for i in range(3 ): for j in range(3 ): inverse_matrix[i][j] /= d(_lowerCamelCase ) # Calculate the inverse of the matrix return [[float(d(_lowerCamelCase ) ) or 0.0 for n in row] for row in inverse_matrix] raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
80
import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def lowercase_ ( _lowerCamelCase : int): lowercase__ : int = [] embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', f'''stage{idx}.patch_embed.proj.weight''', )) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', f'''stage{idx}.patch_embed.proj.bias''', )) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', f'''stage{idx}.patch_embed.norm.weight''', )) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', f'''stage{idx}.patch_embed.norm.bias''', )) return embed def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : int): lowercase__ : Optional[Any] = [] attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj.bias''', )) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''')) return attention_weights def lowercase_ ( _lowerCamelCase : Optional[int]): lowercase__ : Tuple = [] token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token")) return token def lowercase_ ( ): lowercase__ : List[str] = [] head.append(("layernorm.weight", "norm.weight")) head.append(("layernorm.bias", "norm.bias")) head.append(("classifier.weight", "head.weight")) head.append(("classifier.bias", "head.bias")) return head def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]): lowercase__ : Optional[Any] = "imagenet-1k-id2label.json" lowercase__ : List[str] = 1000 lowercase__ : Dict = "huggingface/label-files" lowercase__ : List[Any] = num_labels lowercase__ : Tuple = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset")) , "r")) lowercase__ : Tuple = {int(_lowerCamelCase): v for k, v in idalabel.items()} lowercase__ : Any = idalabel lowercase__ : List[Any] = {v: k for k, v in idalabel.items()} lowercase__ : Optional[int] = CvtConfig(num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit("/" , 1)[-1][4:6] == "13": lowercase__ : Any = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit("/" , 1)[-1][4:6] == "21": lowercase__ : Tuple = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: lowercase__ : Union[str, Any] = [2, 2, 20] lowercase__ : Optional[Any] = [3, 12, 16] lowercase__ : Optional[Any] = [192, 768, 1024] lowercase__ : Union[str, Any] = CvtForImageClassification(_lowerCamelCase) lowercase__ : Tuple = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k") lowercase__ : int = image_size lowercase__ : Dict = torch.load(_lowerCamelCase , map_location=torch.device("cpu")) lowercase__ : Any = OrderedDict() lowercase__ : int = [] for idx in range(len(config.depth)): if config.cls_token[idx]: lowercase__ : Dict = list_of_state_dict + cls_token(_lowerCamelCase) lowercase__ : List[str] = list_of_state_dict + embeddings(_lowerCamelCase) for cnt in range(config.depth[idx]): lowercase__ : Any = list_of_state_dict + attention(_lowerCamelCase , _lowerCamelCase) lowercase__ : List[str] = list_of_state_dict + final() for gg in list_of_state_dict: print(_lowerCamelCase) for i in range(len(_lowerCamelCase)): lowercase__ : Dict = original_weights[list_of_state_dict[i][1]] model.load_state_dict(_lowerCamelCase) model.save_pretrained(_lowerCamelCase) image_processor.save_pretrained(_lowerCamelCase) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--cvt_model''', default='''cvt-w24''', type=str, help='''Name of the cvt model you\'d like to convert.''', ) parser.add_argument( '''--image_size''', default=384, type=int, help='''Input Image Size''', ) parser.add_argument( '''--cvt_file_name''', default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''', type=str, help='''Input Image Size''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) UpperCamelCase = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
87
0
# Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version UpperCamelCase = get_logger(__name__) class __UpperCAmelCase : __snake_case : Tuple = "dummy_data" __snake_case : List[Any] = "datasets" __snake_case : List[Any] = False def __init__( self: Any , UpperCAmelCase_: str , UpperCAmelCase_: str , UpperCAmelCase_: Union[Version, str] , UpperCAmelCase_: Optional[str] = None , UpperCAmelCase_: bool = False , UpperCAmelCase_: bool = True , UpperCAmelCase_: Optional[List[Callable]] = None , ): '''simple docstring''' _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = dataset_name _SCREAMING_SNAKE_CASE = cache_dir _SCREAMING_SNAKE_CASE = use_local_dummy_data _SCREAMING_SNAKE_CASE = config # download_callbacks take a single url as input _SCREAMING_SNAKE_CASE = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root _SCREAMING_SNAKE_CASE = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general _SCREAMING_SNAKE_CASE = str(UpperCAmelCase_ ) # to be downloaded _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None @property def UpperCamelCase ( self: List[str] ): '''simple docstring''' if self._dummy_file is None: _SCREAMING_SNAKE_CASE = self.download_dummy_data() return self._dummy_file @property def UpperCamelCase ( self: Union[str, Any] ): '''simple docstring''' if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("""dummy""" , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join("""dummy""" , self.version_name ) @property def UpperCamelCase ( self: Optional[int] ): '''simple docstring''' return os.path.join(self.dummy_data_folder , """dummy_data.zip""" ) def UpperCamelCase ( self: Optional[int] ): '''simple docstring''' _SCREAMING_SNAKE_CASE = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) _SCREAMING_SNAKE_CASE = cached_path( UpperCAmelCase_ , cache_dir=self.cache_dir , extract_compressed_file=UpperCAmelCase_ , force_extract=UpperCAmelCase_ ) return os.path.join(UpperCAmelCase_ , self.dummy_file_name ) @property def UpperCamelCase ( self: Any ): '''simple docstring''' return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def UpperCamelCase ( self: Optional[Any] ): '''simple docstring''' if self._bucket_url is None: _SCREAMING_SNAKE_CASE = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) ) return self._bucket_url @property def UpperCamelCase ( self: List[str] ): '''simple docstring''' if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] ) def UpperCamelCase ( self: str , UpperCAmelCase_: str , *UpperCAmelCase_: Dict ): '''simple docstring''' if self.load_existing_dummy_data: # dummy data is downloaded and tested _SCREAMING_SNAKE_CASE = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned _SCREAMING_SNAKE_CASE = self.dummy_file_name # special case when data_url is a dict if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): return self.create_dummy_data_dict(UpperCAmelCase_ , UpperCAmelCase_ ) elif isinstance(UpperCAmelCase_ , (list, tuple) ): return self.create_dummy_data_list(UpperCAmelCase_ , UpperCAmelCase_ ) else: return self.create_dummy_data_single(UpperCAmelCase_ , UpperCAmelCase_ ) def UpperCamelCase ( self: int , UpperCAmelCase_: Tuple , *UpperCAmelCase_: Any ): '''simple docstring''' return self.download_and_extract(UpperCAmelCase_ ) def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any] ): '''simple docstring''' return self.download_and_extract(UpperCAmelCase_ ) def UpperCamelCase ( self: Dict , UpperCAmelCase_: Dict , *UpperCAmelCase_: Tuple , **UpperCAmelCase_: Union[str, Any] ): '''simple docstring''' return path def UpperCamelCase ( self: str ): '''simple docstring''' return {} def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Dict , UpperCAmelCase_: List[str] ): '''simple docstring''' _SCREAMING_SNAKE_CASE = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): for single_url in single_urls: download_callback(UpperCAmelCase_ ) else: _SCREAMING_SNAKE_CASE = single_urls download_callback(UpperCAmelCase_ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): _SCREAMING_SNAKE_CASE = [os.path.join(UpperCAmelCase_ , urllib.parse.quote_plus(Path(UpperCAmelCase_ ).name ) ) for x in single_urls] else: _SCREAMING_SNAKE_CASE = single_urls _SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase_ , urllib.parse.quote_plus(Path(UpperCAmelCase_ ).name ) ) _SCREAMING_SNAKE_CASE = value # make sure that values are unique if all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique _SCREAMING_SNAKE_CASE = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def UpperCamelCase ( self: int , UpperCAmelCase_: Dict , UpperCAmelCase_: Any ): '''simple docstring''' _SCREAMING_SNAKE_CASE = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one _SCREAMING_SNAKE_CASE = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , UpperCAmelCase_ ) ) for url in data_url ) _SCREAMING_SNAKE_CASE = all( url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): _SCREAMING_SNAKE_CASE = [data_url[0]] * len(UpperCAmelCase_ ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(UpperCAmelCase_ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase_ , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) ) dummy_data_list.append(UpperCAmelCase_ ) return dummy_data_list def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Dict , UpperCAmelCase_: List[Any] ): '''simple docstring''' for download_callback in self.download_callbacks: download_callback(UpperCAmelCase_ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase_ , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) ) if os.path.exists(UpperCAmelCase_ ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def UpperCamelCase ( self: Optional[Any] ): '''simple docstring''' pass def UpperCamelCase ( self: Optional[Any] ): '''simple docstring''' pass def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Union[str, Any] ): '''simple docstring''' def _iter_archive_members(UpperCAmelCase_: Any ): # this preserves the order of the members inside the ZIP archive _SCREAMING_SNAKE_CASE = Path(self.dummy_file ).parent _SCREAMING_SNAKE_CASE = path.relative_to(UpperCAmelCase_ ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: _SCREAMING_SNAKE_CASE = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = Path(UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = _iter_archive_members(UpperCAmelCase_ ) if self.use_local_dummy_data else path.rglob("""*""" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ): yield file_path.relative_to(UpperCAmelCase_ ).as_posix(), file_path.open("""rb""" ) def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: List[Any] ): '''simple docstring''' if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): _SCREAMING_SNAKE_CASE = [paths] for path in paths: if os.path.isfile(UpperCAmelCase_ ): if os.path.basename(UpperCAmelCase_ ).startswith((""".""", """__""") ): return yield path else: for dirpath, dirnames, filenames in os.walk(UpperCAmelCase_ ): if os.path.basename(UpperCAmelCase_ ).startswith((""".""", """__""") ): continue dirnames.sort() for filename in sorted(UpperCAmelCase_ ): if filename.startswith((""".""", """__""") ): continue yield os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
125
import copy import re class __UpperCAmelCase : __snake_case : Any = "hp" __snake_case : str = {} __snake_case : List[Any] = None @classmethod def UpperCamelCase ( cls: Optional[Any] , UpperCAmelCase_: int , UpperCAmelCase_: Optional[int] ): '''simple docstring''' _SCREAMING_SNAKE_CASE = prefix _SCREAMING_SNAKE_CASE = defaults cls.build_naming_info() @staticmethod def UpperCamelCase ( UpperCAmelCase_: Any , UpperCAmelCase_: str ): '''simple docstring''' if len(UpperCAmelCase_ ) == 0: return "" _SCREAMING_SNAKE_CASE = None if any(char.isdigit() for char in word ): raise Exception(F'Parameters should not contain numbers: \'{word}\' contains a number' ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(UpperCAmelCase_ ) + 1 ): _SCREAMING_SNAKE_CASE = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: _SCREAMING_SNAKE_CASE = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(UpperCAmelCase_: List[Any] ): _SCREAMING_SNAKE_CASE = """""" while integer != 0: _SCREAMING_SNAKE_CASE = chr(ord("""A""" ) + integer % 10 ) + s integer //= 10 return s _SCREAMING_SNAKE_CASE = 0 while True: _SCREAMING_SNAKE_CASE = word + """#""" + int_to_alphabetic(UpperCAmelCase_ ) if sword in info["reverse_short_word"]: continue else: _SCREAMING_SNAKE_CASE = sword break _SCREAMING_SNAKE_CASE = short_word _SCREAMING_SNAKE_CASE = word return short_word @staticmethod def UpperCamelCase ( UpperCAmelCase_: List[str] , UpperCAmelCase_: List[str] ): '''simple docstring''' _SCREAMING_SNAKE_CASE = param_name.split("""_""" ) _SCREAMING_SNAKE_CASE = [TrialShortNamer.shortname_for_word(UpperCAmelCase_ , UpperCAmelCase_ ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name _SCREAMING_SNAKE_CASE = ["""""", """_"""] for separator in separators: _SCREAMING_SNAKE_CASE = separator.join(UpperCAmelCase_ ) if shortname not in info["reverse_short_param"]: _SCREAMING_SNAKE_CASE = shortname _SCREAMING_SNAKE_CASE = param_name return shortname return param_name @staticmethod def UpperCamelCase ( UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int] ): '''simple docstring''' _SCREAMING_SNAKE_CASE = TrialShortNamer.shortname_for_key(UpperCAmelCase_ , UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = short_name _SCREAMING_SNAKE_CASE = param_name @classmethod def UpperCamelCase ( cls: str ): '''simple docstring''' if cls.NAMING_INFO is not None: return _SCREAMING_SNAKE_CASE = { """short_word""": {}, """reverse_short_word""": {}, """short_param""": {}, """reverse_short_param""": {}, } _SCREAMING_SNAKE_CASE = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(UpperCAmelCase_ , UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = info @classmethod def UpperCamelCase ( cls: Any , UpperCAmelCase_: Optional[Any] ): '''simple docstring''' cls.build_naming_info() assert cls.PREFIX is not None _SCREAMING_SNAKE_CASE = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(F'You should provide a default value for the param name {k} with value {v}' ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue _SCREAMING_SNAKE_CASE = cls.NAMING_INFO["""short_param"""][k] if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): _SCREAMING_SNAKE_CASE = 1 if v else 0 _SCREAMING_SNAKE_CASE = """""" if isinstance(UpperCAmelCase_ , (int, float) ) else """-""" _SCREAMING_SNAKE_CASE = F'{key}{sep}{v}' name.append(UpperCAmelCase_ ) return "_".join(UpperCAmelCase_ ) @classmethod def UpperCamelCase ( cls: int , UpperCAmelCase_: int ): '''simple docstring''' _SCREAMING_SNAKE_CASE = repr[len(cls.PREFIX ) + 1 :] if repr == "": _SCREAMING_SNAKE_CASE = [] else: _SCREAMING_SNAKE_CASE = repr.split("""_""" ) _SCREAMING_SNAKE_CASE = {} for value in values: if "-" in value: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = value.split("""-""" ) else: _SCREAMING_SNAKE_CASE = re.sub("""[0-9.]""" , """""" , UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = float(re.sub("""[^0-9.]""" , """""" , UpperCAmelCase_ ) ) _SCREAMING_SNAKE_CASE = cls.NAMING_INFO["""reverse_short_param"""][p_k] _SCREAMING_SNAKE_CASE = p_v for k in cls.DEFAULTS: if k not in parameters: _SCREAMING_SNAKE_CASE = cls.DEFAULTS[k] return parameters
125
1
import math from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json", # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class a ( snake_case__ ): """simple docstring""" lowerCamelCase :Dict = '''data2vec-audio''' def __init__( self , lowerCAmelCase_=32 , lowerCAmelCase_=7_68 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=30_72 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-5 , lowerCAmelCase_="gelu" , lowerCAmelCase_=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowerCAmelCase_=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase_=(10, 3, 3, 3, 3, 2, 2) , lowerCAmelCase_=False , lowerCAmelCase_=16 , lowerCAmelCase_=19 , lowerCAmelCase_=5 , lowerCAmelCase_=0.05 , lowerCAmelCase_=10 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0 , lowerCAmelCase_=10 , lowerCAmelCase_=0 , lowerCAmelCase_="sum" , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=2_56 , lowerCAmelCase_=(5_12, 5_12, 5_12, 5_12, 15_00) , lowerCAmelCase_=(5, 3, 3, 1, 1) , lowerCAmelCase_=(1, 2, 3, 1, 1) , lowerCAmelCase_=5_12 , lowerCAmelCase_=0 , lowerCAmelCase_=1 , lowerCAmelCase_=2 , lowerCAmelCase_=False , lowerCAmelCase_=3 , lowerCAmelCase_=2 , lowerCAmelCase_=3 , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> int: super().__init__(**__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case ) _A = hidden_size _A = feat_extract_activation _A = list(__snake_case ) _A = list(__snake_case ) _A = list(__snake_case ) _A = conv_bias _A = num_conv_pos_embeddings _A = num_conv_pos_embedding_groups _A = conv_pos_kernel_size _A = len(self.conv_dim ) _A = num_hidden_layers _A = intermediate_size _A = hidden_act _A = num_attention_heads _A = hidden_dropout _A = attention_dropout _A = activation_dropout _A = feat_proj_dropout _A = final_dropout _A = layerdrop _A = layer_norm_eps _A = initializer_range _A = vocab_size _A = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _A = mask_time_prob _A = mask_time_length _A = mask_time_min_masks _A = mask_feature_prob _A = mask_feature_length _A = mask_feature_min_masks # ctc loss _A = ctc_loss_reduction _A = ctc_zero_infinity # adapter _A = add_adapter _A = adapter_kernel_size _A = adapter_stride _A = num_adapter_layers _A = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. _A = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _A = list(__snake_case ) _A = list(__snake_case ) _A = list(__snake_case ) _A = xvector_output_dim @property def UpperCAmelCase ( self ) -> List[Any]: return math.prod(self.conv_stride )
180
def UpperCAmelCase__ (UpperCamelCase_ = 4_00_00_00 ): """simple docstring""" snake_case = [0, 1] snake_case = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1] ) if fib[i + 2] > n: break i += 1 snake_case = 0 for j in range(len(UpperCamelCase_ ) - 1 ): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(f'''{solution() = }''')
127
0
from ..utils import DummyObject, requires_backends class lowercase ( metaclass=UpperCamelCase__ ): __SCREAMING_SNAKE_CASE : int = ["""flax""", """transformers"""] def __init__( self , *snake_case , **snake_case ): requires_backends(self , ['flax', 'transformers'] ) @classmethod def a ( cls , *snake_case , **snake_case ): requires_backends(cls , ['flax', 'transformers'] ) @classmethod def a ( cls , *snake_case , **snake_case ): requires_backends(cls , ['flax', 'transformers'] ) class lowercase ( metaclass=UpperCamelCase__ ): __SCREAMING_SNAKE_CASE : Dict = ["""flax""", """transformers"""] def __init__( self , *snake_case , **snake_case ): requires_backends(self , ['flax', 'transformers'] ) @classmethod def a ( cls , *snake_case , **snake_case ): requires_backends(cls , ['flax', 'transformers'] ) @classmethod def a ( cls , *snake_case , **snake_case ): requires_backends(cls , ['flax', 'transformers'] ) class lowercase ( metaclass=UpperCamelCase__ ): __SCREAMING_SNAKE_CASE : Optional[Any] = ["""flax""", """transformers"""] def __init__( self , *snake_case , **snake_case ): requires_backends(self , ['flax', 'transformers'] ) @classmethod def a ( cls , *snake_case , **snake_case ): requires_backends(cls , ['flax', 'transformers'] ) @classmethod def a ( cls , *snake_case , **snake_case ): requires_backends(cls , ['flax', 'transformers'] ) class lowercase ( metaclass=UpperCamelCase__ ): __SCREAMING_SNAKE_CASE : Optional[int] = ["""flax""", """transformers"""] def __init__( self , *snake_case , **snake_case ): requires_backends(self , ['flax', 'transformers'] ) @classmethod def a ( cls , *snake_case , **snake_case ): requires_backends(cls , ['flax', 'transformers'] ) @classmethod def a ( cls , *snake_case , **snake_case ): requires_backends(cls , ['flax', 'transformers'] )
368
# Lint as: python3 import itertools import os import re _UpperCAmelCase : str = re.compile(R"""([A-Z]+)([A-Z][a-z])""") _UpperCAmelCase : Dict = re.compile(R"""([a-z\d])([A-Z])""") _UpperCAmelCase : Dict = re.compile(R"""(?<!_)_(?!_)""") _UpperCAmelCase : Tuple = re.compile(R"""(_{2,})""") _UpperCAmelCase : Any = R"""^\w+(\.\w+)*$""" _UpperCAmelCase : List[str] = R"""<>:/\|?*""" def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = _uppercase_uppercase_re.sub(r'\1_\2' , UpperCamelCase__ ) snake_case_ = _lowercase_uppercase_re.sub(r'\1_\2' , UpperCamelCase__ ) return name.lower() def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = _single_underscore_re.split(UpperCamelCase__ ) snake_case_ = [_multiple_underscores_re.split(UpperCamelCase__ ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(UpperCamelCase__ ) if n != '' ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' if os.path.basename(UpperCamelCase__ ) != name: raise ValueError(F'''Should be a dataset name, not a path: {name}''' ) return camelcase_to_snakecase(UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' if os.path.basename(UpperCamelCase__ ) != name: raise ValueError(F'''Should be a dataset name, not a path: {name}''' ) if not re.match(_split_re , UpperCamelCase__ ): raise ValueError(F'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' ) return F'''{filename_prefix_for_name(UpperCamelCase__ )}-{split}''' def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ): '''simple docstring''' snake_case_ = filename_prefix_for_split(UpperCamelCase__ , UpperCamelCase__ ) if filetype_suffix: prefix += F'''.{filetype_suffix}''' snake_case_ = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) return F'''{filepath}*''' def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None ): '''simple docstring''' snake_case_ = filename_prefix_for_split(UpperCamelCase__ , UpperCamelCase__ ) snake_case_ = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) if shard_lengths: snake_case_ = len(UpperCamelCase__ ) snake_case_ = [F'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(UpperCamelCase__ )] if filetype_suffix: snake_case_ = [filename + F'''.{filetype_suffix}''' for filename in filenames] return filenames else: snake_case_ = prefix if filetype_suffix: filename += F'''.{filetype_suffix}''' return [filename]
200
0
"""simple docstring""" from __future__ import annotations def lowercase__( __SCREAMING_SNAKE_CASE : list[list[int]] ): for i in range(1 , len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 , len(lowerCamelCase_ ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 , len(lowerCamelCase_ ) ): for j in range(1 , len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
213
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __snake_case : Optional[Any] ={ 'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'], 'tokenization_cpmant': ['CpmAntTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Optional[Any] =[ 'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST', 'CpmAntForCausalLM', 'CpmAntModel', 'CpmAntPreTrainedModel', ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys __snake_case : int =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
129
0
from .testing import ( are_the_same_tensors, execute_subprocess_async, require_bnb, require_cpu, require_cuda, require_huggingface_suite, require_mps, require_multi_gpu, require_multi_xpu, require_safetensors, require_single_gpu, require_single_xpu, require_torch_min_version, require_tpu, require_xpu, skip, slow, ) from .training import RegressionDataset, RegressionModel, RegressionModelaXPU from .scripts import test_script, test_sync, test_ops # isort: skip
353
import argparse import os import re import packaging.version lowerCamelCase : Optional[Any] ='''examples/''' lowerCamelCase : List[Any] ={ '''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''), '''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } lowerCamelCase : List[str] ={ '''init''': '''src/transformers/__init__.py''', '''setup''': '''setup.py''', } lowerCamelCase : int ='''README.md''' def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]: with open(__lowerCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f: UpperCamelCase__ : List[Any] = f.read() UpperCamelCase__ , UpperCamelCase__ : List[str] = REPLACE_PATTERNS[pattern] UpperCamelCase__ : Union[str, Any] = replace.replace("VERSION" , __lowerCAmelCase ) UpperCamelCase__ : Tuple = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase ) with open(__lowerCAmelCase , "w" , encoding="utf-8" , newline="\n" ) as f: f.write(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Union[str, Any]: for folder, directories, fnames in os.walk(__lowerCAmelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("research_projects" ) if "legacy" in directories: directories.remove("legacy" ) for fname in fnames: if fname.endswith(".py" ): update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern="examples" ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=False ) -> Optional[int]: for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if not patch: update_version_in_examples(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: UpperCamelCase__ : Tuple = "🤗 Transformers currently provides the following architectures" UpperCamelCase__ : Tuple = "1. Want to contribute a new model?" with open(__lowerCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f: UpperCamelCase__ : Optional[int] = f.readlines() # Find the start of the list. UpperCamelCase__ : List[Any] = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 UpperCamelCase__ : Dict = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("1." ): UpperCamelCase__ : str = lines[index].replace( "https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , ) index += 1 with open(__lowerCAmelCase , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( ) -> Tuple: with open(REPLACE_FILES["init"] , "r" ) as f: UpperCamelCase__ : str = f.read() UpperCamelCase__ : Dict = REPLACE_PATTERNS["init"][0].search(__lowerCAmelCase ).groups()[0] return packaging.version.parse(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase=False ) -> Optional[int]: UpperCamelCase__ : Dict = get_version() if patch and default_version.is_devrelease: raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" ) if default_version.is_devrelease: UpperCamelCase__ : List[str] = default_version.base_version elif patch: UpperCamelCase__ : int = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}' else: UpperCamelCase__ : Tuple = f'{default_version.major}.{default_version.minor + 1}.0' # Now let's ask nicely if that's the right one. UpperCamelCase__ : Tuple = input(f'Which version are you releasing? [{default_version}]' ) if len(__lowerCAmelCase ) == 0: UpperCamelCase__ : Any = default_version print(f'Updating version to {version}.' ) global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase ) if not patch: print("Cleaning main README, don't forget to run `make fix-copies`." ) clean_main_ref_in_model_list() def SCREAMING_SNAKE_CASE ( ) -> int: UpperCamelCase__ : str = get_version() UpperCamelCase__ : Dict = f'{current_version.major}.{current_version.minor + 1}.0.dev0' UpperCamelCase__ : int = current_version.base_version # Check with the user we got that right. UpperCamelCase__ : List[str] = input(f'Which version are we developing now? [{dev_version}]' ) if len(__lowerCAmelCase ) == 0: UpperCamelCase__ : Optional[Any] = dev_version print(f'Updating version to {version}.' ) global_version_update(__lowerCAmelCase ) print("Cleaning main README, don't forget to run `make fix-copies`." ) clean_main_ref_in_model_list() if __name__ == "__main__": lowerCamelCase : List[Any] =argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') lowerCamelCase : Optional[Any] =parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
196
0
'''simple docstring''' def A_ ( snake_case ): SCREAMING_SNAKE_CASE:List[str] = int(snake_case ) if n_element < 1: SCREAMING_SNAKE_CASE:List[str] = ValueError("a should be a positive number" ) raise my_error SCREAMING_SNAKE_CASE:Any = [1] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Dict = (0, 0, 0) SCREAMING_SNAKE_CASE:Union[str, Any] = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": A_ = input("Enter the last number (nth term) of the Hamming Number Series: ") print("Formula of Hamming Number Series => 2^i * 3^j * 5^k") A_ = hamming(int(n)) print("-----------------------------------------------------") print(f'''The list with nth numbers is: {hamming_numbers}''') print("-----------------------------------------------------")
139
'''simple docstring''' from __future__ import annotations A_ = list[list[int]] # assigning initial values to the grid A_ = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution A_ = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def A_ ( snake_case , snake_case , snake_case , snake_case ): for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def A_ ( snake_case ): for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def A_ ( snake_case ): if location := find_empty_location(snake_case ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Optional[int] = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(snake_case , snake_case , snake_case , snake_case ): SCREAMING_SNAKE_CASE:List[str] = digit if sudoku(snake_case ) is not None: return grid SCREAMING_SNAKE_CASE:List[Any] = 0 return None def A_ ( snake_case ): for row in grid: for cell in row: print(snake_case , end=" " ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("\nExample grid:\n" + "=" * 20) print_solution(example_grid) print("\nExample grid solution:") A_ = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("Cannot find a solution.")
139
1
from __future__ import annotations from decimal import Decimal from numpy import array def __UpperCamelCase ( lowercase__ : list[list[float]] ) -> list[list[float]]: '''simple docstring''' lowerCAmelCase_ : Any = Decimal # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices if len(lowercase__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2: # Calculate the determinant of the matrix lowerCAmelCase_ : Any = float( d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) ) if determinant == 0: raise ValueError("""This matrix has no inverse.""" ) # Creates a copy of the matrix with swapped positions of the elements lowerCAmelCase_ : Tuple = [[0.0, 0.0], [0.0, 0.0]] lowerCAmelCase_ : Dict = matrix[1][1], matrix[0][0] lowerCAmelCase_ : Optional[int] = -matrix[1][0], -matrix[0][1] # Calculate the inverse of the matrix return [ [(float(d(lowercase__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix ] elif ( len(lowercase__ ) == 3 and len(matrix[0] ) == 3 and len(matrix[1] ) == 3 and len(matrix[2] ) == 3 ): # Calculate the determinant of the matrix using Sarrus rule lowerCAmelCase_ : int = float( ( (d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] )) + (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] )) + (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] )) ) - ( (d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] )) + (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] )) + (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] )) ) ) if determinant == 0: raise ValueError("""This matrix has no inverse.""" ) # Creating cofactor matrix lowerCAmelCase_ : Union[str, Any] = [ [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], ] lowerCAmelCase_ : Any = (d(matrix[1][1] ) * d(matrix[2][2] )) - ( d(matrix[1][2] ) * d(matrix[2][1] ) ) lowerCAmelCase_ : List[str] = -( (d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] )) ) lowerCAmelCase_ : str = (d(matrix[1][0] ) * d(matrix[2][1] )) - ( d(matrix[1][1] ) * d(matrix[2][0] ) ) lowerCAmelCase_ : Tuple = -( (d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] )) ) lowerCAmelCase_ : Dict = (d(matrix[0][0] ) * d(matrix[2][2] )) - ( d(matrix[0][2] ) * d(matrix[2][0] ) ) lowerCAmelCase_ : Union[str, Any] = -( (d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] )) ) lowerCAmelCase_ : Any = (d(matrix[0][1] ) * d(matrix[1][2] )) - ( d(matrix[0][2] ) * d(matrix[1][1] ) ) lowerCAmelCase_ : List[Any] = -( (d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] )) ) lowerCAmelCase_ : List[Any] = (d(matrix[0][0] ) * d(matrix[1][1] )) - ( d(matrix[0][1] ) * d(matrix[1][0] ) ) # Transpose the cofactor matrix (Adjoint matrix) lowerCAmelCase_ : List[Any] = array(lowercase__ ) for i in range(3 ): for j in range(3 ): lowerCAmelCase_ : Optional[int] = cofactor_matrix[j][i] # Inverse of the matrix using the formula (1/determinant) * adjoint matrix lowerCAmelCase_ : int = array(lowercase__ ) for i in range(3 ): for j in range(3 ): inverse_matrix[i][j] /= d(lowercase__ ) # Calculate the inverse of the matrix return [[float(d(lowercase__ ) ) or 0.0 for n in row] for row in inverse_matrix] raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
356
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger(__name__) def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : str=False ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ : int = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """vit.embeddings.cls_token"""), ("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" lowerCAmelCase_ : int = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def __UpperCamelCase ( lowercase__ : int , lowercase__ : Dict , lowercase__ : Optional[Any]=False ) -> Optional[Any]: '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: lowerCAmelCase_ : int = """""" else: lowerCAmelCase_ : Union[str, Any] = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCAmelCase_ : str = state_dict.pop(f'blocks.{i}.attn.qkv.weight' ) lowerCAmelCase_ : Any = state_dict.pop(f'blocks.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase_ : Dict = in_proj_weight[ : config.hidden_size, : ] lowerCAmelCase_ : int = in_proj_bias[: config.hidden_size] lowerCAmelCase_ : Tuple = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCAmelCase_ : int = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCAmelCase_ : Optional[Any] = in_proj_weight[ -config.hidden_size :, : ] lowerCAmelCase_ : Dict = in_proj_bias[-config.hidden_size :] def __UpperCamelCase ( lowercase__ : Any ) -> Any: '''simple docstring''' lowerCAmelCase_ : Any = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(lowercase__ , lowercase__ ) def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Optional[Any] ) -> List[str]: '''simple docstring''' lowerCAmelCase_ : Dict = dct.pop(lowercase__ ) lowerCAmelCase_ : List[Any] = val def __UpperCamelCase ( ) -> str: '''simple docstring''' lowerCAmelCase_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCAmelCase_ : List[str] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ) return im @torch.no_grad() def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : Any=True ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ : List[Any] = ViTConfig() # patch_size if model_name[-1] == "8": lowerCAmelCase_ : Dict = 8 # set labels if required if not base_model: lowerCAmelCase_ : str = 1000 lowerCAmelCase_ : List[Any] = """huggingface/label-files""" lowerCAmelCase_ : Optional[int] = """imagenet-1k-id2label.json""" lowerCAmelCase_ : str = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) ) lowerCAmelCase_ : List[str] = {int(lowercase__ ): v for k, v in idalabel.items()} lowerCAmelCase_ : Any = idalabel lowerCAmelCase_ : Union[str, Any] = {v: k for k, v in idalabel.items()} # size of the architecture if model_name in ["dino_vits8", "dino_vits16"]: lowerCAmelCase_ : Union[str, Any] = 384 lowerCAmelCase_ : Any = 1536 lowerCAmelCase_ : Union[str, Any] = 12 lowerCAmelCase_ : str = 6 # load original model from torch hub lowerCAmelCase_ : Any = torch.hub.load("""facebookresearch/dino:main""" , lowercase__ ) original_model.eval() # load state_dict of original model, remove and rename some keys lowerCAmelCase_ : Any = original_model.state_dict() if base_model: remove_classification_head_(lowercase__ ) lowerCAmelCase_ : Dict = create_rename_keys(lowercase__ , base_model=lowercase__ ) for src, dest in rename_keys: rename_key(lowercase__ , lowercase__ , lowercase__ ) read_in_q_k_v(lowercase__ , lowercase__ , lowercase__ ) # load HuggingFace model if base_model: lowerCAmelCase_ : int = ViTModel(lowercase__ , add_pooling_layer=lowercase__ ).eval() else: lowerCAmelCase_ : Union[str, Any] = ViTForImageClassification(lowercase__ ).eval() model.load_state_dict(lowercase__ ) # Check outputs on an image, prepared by ViTImageProcessor lowerCAmelCase_ : List[str] = ViTImageProcessor() lowerCAmelCase_ : List[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" ) lowerCAmelCase_ : List[str] = encoding["""pixel_values"""] lowerCAmelCase_ : Optional[int] = model(lowercase__ ) if base_model: lowerCAmelCase_ : Union[str, Any] = original_model(lowercase__ ) assert torch.allclose(lowercase__ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 ) else: lowerCAmelCase_ : int = original_model(lowercase__ ) assert logits.shape == outputs.logits.shape assert torch.allclose(lowercase__ , outputs.logits , atol=1E-3 ) Path(lowercase__ ).mkdir(exist_ok=lowercase__ ) print(f'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(lowercase__ ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(lowercase__ ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='dino_vitb16', type=str, help='Name of the model trained with DINO you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--base_model', action='store_true', help='Whether to only convert the base model (no projection head weights).', ) parser.set_defaults(base_model=True) __UpperCAmelCase = parser.parse_args() convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
28
0