code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import jax.numpy as jnp from ...utils import logging from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel from .configuration_mta import MTaConfig lowerCAmelCase__ :List[str] = logging.get_logger(__name__) lowerCAmelCase__ :str = '''T5Config''' def lowerCAmelCase__ ( a__: jnp.array , a__: int , a__: int ) -> jnp.ndarray: '''simple docstring''' _UpperCAmelCase = jnp.zeros_like(a__ ) _UpperCAmelCase = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] ) _UpperCAmelCase = shifted_input_ids.at[:, 0].set(a__ ) _UpperCAmelCase = jnp.where(shifted_input_ids == -1_0_0 , a__ , a__ ) return shifted_input_ids class __a ( UpperCAmelCase ): _a : Tuple = 'mt5' _a : List[str] = MTaConfig class __a ( UpperCAmelCase ): _a : Tuple = 'mt5' _a : List[Any] = MTaConfig class __a ( UpperCAmelCase ): _a : List[str] = 'mt5' _a : Optional[int] = MTaConfig
618
import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE=0.01 , _SCREAMING_SNAKE_CASE=1000 ) -> str: """simple docstring""" _UpperCAmelCase = p_stop _UpperCAmelCase = max_length def __iter__( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = 0 _UpperCAmelCase = False while not stop and count < self.max_length: yield count count += 1 _UpperCAmelCase = random.random() < self.p_stop class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True ) -> int: """simple docstring""" _UpperCAmelCase = [ BatchSamplerShard(_SCREAMING_SNAKE_CASE , 2 , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) for i in range(2 ) ] _UpperCAmelCase = [list(_SCREAMING_SNAKE_CASE ) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(_SCREAMING_SNAKE_CASE ) for shard in batch_sampler_shards] , [len(_SCREAMING_SNAKE_CASE ) for e in expected] ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) # Expected shouldn't change self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. _UpperCAmelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. _UpperCAmelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. _UpperCAmelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is very small. _UpperCAmelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [[], []] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE ) # Expected shouldn't change self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is not a round multiple of batch size. _UpperCAmelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. _UpperCAmelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is very small. _UpperCAmelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [[], []] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) # Expected shouldn't change self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. _UpperCAmelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. _UpperCAmelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. _UpperCAmelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is very small. _UpperCAmelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [[[0, 1]], []] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [[], []] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE ) # Expected shouldn't change self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is not a round multiple of batch size. _UpperCAmelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. _UpperCAmelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is very small. _UpperCAmelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [[[0, 1]], []] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [[], []] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] _UpperCAmelCase = [BatchSamplerShard(_SCREAMING_SNAKE_CASE , 2 , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) for i in range(2 )] self.assertEqual(len(batch_sampler_shards[0] ) , 3 ) self.assertEqual(len(batch_sampler_shards[1] ) , 2 ) self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] ) self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=False ) -> Dict: """simple docstring""" random.seed(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = list(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ IterableDatasetShard( _SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , drop_last=_SCREAMING_SNAKE_CASE , num_processes=_SCREAMING_SNAKE_CASE , process_index=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , ) for i in range(_SCREAMING_SNAKE_CASE ) ] _UpperCAmelCase = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(_SCREAMING_SNAKE_CASE ) iterable_dataset_lists.append(list(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size _UpperCAmelCase = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) ) self.assertTrue(len(_SCREAMING_SNAKE_CASE ) % shard_batch_size == 0 ) _UpperCAmelCase = [] for idx in range(0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(_SCREAMING_SNAKE_CASE ) < len(_SCREAMING_SNAKE_CASE ): reference += reference self.assertListEqual(_SCREAMING_SNAKE_CASE , reference[: len(_SCREAMING_SNAKE_CASE )] ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" _UpperCAmelCase = 42 _UpperCAmelCase = RandomIterableDataset() self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE ) self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE ) self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE ) self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE ) # Edge case with a very small dataset _UpperCAmelCase = RandomIterableDataset(max_length=2 ) self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE ) self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE ) self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE ) self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = BatchSampler(range(16 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = SkipBatchSampler(_SCREAMING_SNAKE_CASE , 2 ) self.assertListEqual(list(_SCREAMING_SNAKE_CASE ) , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 ) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = DataLoader(list(range(16 ) ) , batch_size=4 ) _UpperCAmelCase = skip_first_batches(_SCREAMING_SNAKE_CASE , num_batches=2 ) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = DataLoaderShard(list(range(16 ) ) , batch_size=4 ) for idx, _ in enumerate(_SCREAMING_SNAKE_CASE ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(_SCREAMING_SNAKE_CASE ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" Accelerator() _UpperCAmelCase = DataLoaderDispatcher(range(16 ) , batch_size=4 ) for idx, _ in enumerate(_SCREAMING_SNAKE_CASE ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(_SCREAMING_SNAKE_CASE ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
618
1
'''simple docstring''' def __a ( A__ ) -> int: return 1 if digit in (0, 1) else (digit * factorial(digit - 1 )) def __a ( A__ ) -> bool: lowerCAmelCase = 0 lowerCAmelCase = number while duplicate > 0: lowerCAmelCase , lowerCAmelCase = divmod(A__ , 10 ) fact_sum += factorial(A__ ) return fact_sum == number if __name__ == "__main__": print('Program to check whether a number is a Krisnamurthy Number or not.') lowercase : List[Any] = int(input('Enter number: ').strip()) print( f"{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number." )
159
'''simple docstring''' from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging lowercase : List[str] = logging.get_logger(__name__) class _lowerCAmelCase ( UpperCamelCase_ ): """simple docstring""" lowerCAmelCase = ['pixel_values'] def __init__( self : Any , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : int = 8 , **SCREAMING_SNAKE_CASE : Optional[Any] , ) -> None: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) lowerCAmelCase = do_rescale lowerCAmelCase = rescale_factor lowerCAmelCase = do_pad lowerCAmelCase = pad_size def __A ( self : List[Any] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Union[str, Any] ) -> np.ndarray: """simple docstring""" return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __A ( self : int , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None ) -> int: """simple docstring""" lowerCAmelCase , lowerCAmelCase = get_image_size(SCREAMING_SNAKE_CASE ) lowerCAmelCase = (old_height // size + 1) * size - old_height lowerCAmelCase = (old_width // size + 1) * size - old_width return pad(SCREAMING_SNAKE_CASE , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=SCREAMING_SNAKE_CASE ) def __A ( self : Any , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : Optional[int] , ) -> Dict: """simple docstring""" lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase = do_pad if do_pad is not None else self.do_pad lowerCAmelCase = pad_size if pad_size is not None else self.pad_size lowerCAmelCase = make_list_of_images(SCREAMING_SNAKE_CASE ) if not valid_images(SCREAMING_SNAKE_CASE ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) # All transformations expect numpy arrays. lowerCAmelCase = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: lowerCAmelCase = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images] if do_pad: lowerCAmelCase = [self.pad(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images] lowerCAmelCase = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images] lowerCAmelCase = {"pixel_values": images} return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
159
1
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { 'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json', } class __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" snake_case__ = "focalnet" def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str=224 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=3 , SCREAMING_SNAKE_CASE__ : List[Any]=96 , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Tuple=[192, 384, 768, 768] , SCREAMING_SNAKE_CASE__ : Dict=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE__ : Optional[int]=[2, 2, 2, 2] , SCREAMING_SNAKE_CASE__ : Optional[int]=[3, 3, 3, 3] , SCREAMING_SNAKE_CASE__ : List[str]="gelu" , SCREAMING_SNAKE_CASE__ : List[Any]=4.0 , SCREAMING_SNAKE_CASE__ : Any=0.0 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : Optional[int]=1e-4 , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Dict=0.02 , SCREAMING_SNAKE_CASE__ : Any=1e-5 , SCREAMING_SNAKE_CASE__ : Any=32 , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Any=None , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> str: super().__init__(**SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = image_size lowerCAmelCase__ = patch_size lowerCAmelCase__ = num_channels lowerCAmelCase__ = embed_dim lowerCAmelCase__ = use_conv_embed lowerCAmelCase__ = hidden_sizes lowerCAmelCase__ = depths lowerCAmelCase__ = focal_levels lowerCAmelCase__ = focal_windows lowerCAmelCase__ = hidden_act lowerCAmelCase__ = mlp_ratio lowerCAmelCase__ = hidden_dropout_prob lowerCAmelCase__ = drop_path_rate lowerCAmelCase__ = use_layerscale lowerCAmelCase__ = layerscale_value lowerCAmelCase__ = use_post_layernorm lowerCAmelCase__ = use_post_layernorm_in_modulation lowerCAmelCase__ = normalize_modulator lowerCAmelCase__ = initializer_range lowerCAmelCase__ = layer_norm_eps lowerCAmelCase__ = encoder_stride lowerCAmelCase__ = ["stem"] + [f'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )] lowerCAmelCase__ , lowerCAmelCase__ = get_aligned_output_features_output_indices( out_features=SCREAMING_SNAKE_CASE__ , out_indices=SCREAMING_SNAKE_CASE__ , stage_names=self.stage_names )
61
'''simple docstring''' import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def A__ ( A : Optional[int]): '''simple docstring''' assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def A__ ( ): '''simple docstring''' assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def A__ ( ): '''simple docstring''' UpperCamelCase : Tuple = "mock-s3-bucket" UpperCamelCase : List[str] = F'''s3://{mock_bucket}''' UpperCamelCase : Optional[Any] = extract_path_from_uri(A) assert dataset_path.startswith("s3://") is False UpperCamelCase : Any = "./local/path" UpperCamelCase : str = extract_path_from_uri(A) assert dataset_path == new_dataset_path def A__ ( A : Optional[Any]): '''simple docstring''' UpperCamelCase : List[Any] = is_remote_filesystem(A) assert is_remote is True UpperCamelCase : Tuple = fsspec.filesystem("file") UpperCamelCase : int = is_remote_filesystem(A) assert is_remote is False @pytest.mark.parametrize("compression_fs_class" , A) def A__ ( A : List[Any] , A : Any , A : str , A : Union[str, Any] , A : List[str] , A : List[Any] , A : Optional[Any]): '''simple docstring''' UpperCamelCase : List[str] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file} UpperCamelCase : Any = input_paths[compression_fs_class.protocol] if input_path is None: UpperCamelCase : Any = F'''for \'{compression_fs_class.protocol}\' compression protocol, ''' if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(A) UpperCamelCase : List[str] = fsspec.filesystem(compression_fs_class.protocol , fo=A) assert isinstance(A , A) UpperCamelCase : List[Any] = os.path.basename(A) UpperCamelCase : Union[str, Any] = expected_filename[: expected_filename.rindex(".")] assert fs.glob("*") == [expected_filename] with fs.open(A , "r" , encoding="utf-8") as f, open(A , encoding="utf-8") as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize("protocol" , ["zip", "gzip"]) def A__ ( A : Optional[int] , A : str , A : Optional[Any]): '''simple docstring''' UpperCamelCase : Any = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path} UpperCamelCase : str = compressed_file_paths[protocol] UpperCamelCase : Optional[int] = "dataset.jsonl" UpperCamelCase : Tuple = F'''{protocol}://{member_file_path}::{compressed_file_path}''' UpperCamelCase , *UpperCamelCase : Dict = fsspec.get_fs_token_paths(A) assert fs.isfile(A) assert not fs.isfile("non_existing_" + member_file_path) @pytest.mark.integration def A__ ( A : Dict , A : List[str] , A : Dict , A : List[Any]): '''simple docstring''' UpperCamelCase : Optional[int] = hf_api.dataset_info(A , token=A) UpperCamelCase : List[str] = HfFileSystem(repo_info=A , token=A) assert sorted(hffs.glob("*")) == [".gitattributes", "data"] assert hffs.isdir("data") assert hffs.isfile(".gitattributes") and hffs.isfile("data/text_data.txt") with open(A) as f: assert hffs.open("data/text_data.txt" , "r").read() == f.read() def A__ ( ): '''simple docstring''' UpperCamelCase : str = "bz2" # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(A , A , clobber=A) with pytest.warns(A) as warning_info: importlib.reload(datasets.filesystems) assert len(A) == 1 assert ( str(warning_info[0].message) == F'''A filesystem protocol was already set for {protocol} and will be overwritten.''' )
173
0
"""simple docstring""" from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=SCREAMING_SNAKE_CASE__ ) class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) _lowerCamelCase = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} ) _lowerCamelCase = Features( { '''answers''': Sequence( { '''text''': Value('''string''' ), '''answer_start''': Value('''int32''' ), } ) } ) _lowerCamelCase = "question" _lowerCamelCase = "context" _lowerCamelCase = "answers" @property def UpperCAmelCase__ ( self ) -> Dict[str, str]: '''simple docstring''' return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
21
"""simple docstring""" def __lowerCAmelCase ( __UpperCamelCase : str ): '''simple docstring''' return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
21
1
'''simple docstring''' def UpperCamelCase ( lowercase_ : str ) -> bool: '''simple docstring''' lowercase =0 for ch in input_str: lowercase =ord(lowercase_ ) lowercase =pow(2 , lowercase_ ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
72
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase_ = { """configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ """NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""", """NezhaForNextSentencePrediction""", """NezhaForMaskedLM""", """NezhaForPreTraining""", """NezhaForMultipleChoice""", """NezhaForQuestionAnswering""", """NezhaForSequenceClassification""", """NezhaForTokenClassification""", """NezhaModel""", """NezhaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
678
0
"""simple docstring""" from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def lowercase ( ): """simple docstring""" A__ : List[Any] =ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=UpperCamelCase ) A__ : List[Any] =parser.add_subparsers(help="accelerate command helpers" ) # Register commands get_config_parser(subparsers=UpperCamelCase ) env_command_parser(subparsers=UpperCamelCase ) launch_command_parser(subparsers=UpperCamelCase ) tpu_command_parser(subparsers=UpperCamelCase ) test_command_parser(subparsers=UpperCamelCase ) # Let's go A__ : Optional[Any] =parser.parse_args() if not hasattr(UpperCamelCase , "func" ): parser.print_help() exit(1 ) # Run args.func(UpperCamelCase ) if __name__ == "__main__": main()
700
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A : Dict = { "configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"] } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : str = ["RemBertTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Tuple = ["RemBertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[str] = [ "REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "RemBertForCausalLM", "RemBertForMaskedLM", "RemBertForMultipleChoice", "RemBertForQuestionAnswering", "RemBertForSequenceClassification", "RemBertForTokenClassification", "RemBertLayer", "RemBertModel", "RemBertPreTrainedModel", "load_tf_weights_in_rembert", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[str] = [ "TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRemBertForCausalLM", "TFRemBertForMaskedLM", "TFRemBertForMultipleChoice", "TFRemBertForQuestionAnswering", "TFRemBertForSequenceClassification", "TFRemBertForTokenClassification", "TFRemBertLayer", "TFRemBertModel", "TFRemBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert import RemBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert_fast import RemBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rembert import ( REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RemBertForCausalLM, RemBertForMaskedLM, RemBertForMultipleChoice, RemBertForQuestionAnswering, RemBertForSequenceClassification, RemBertForTokenClassification, RemBertLayer, RemBertModel, RemBertPreTrainedModel, load_tf_weights_in_rembert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rembert import ( TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFRemBertForCausalLM, TFRemBertForMaskedLM, TFRemBertForMultipleChoice, TFRemBertForQuestionAnswering, TFRemBertForSequenceClassification, TFRemBertForTokenClassification, TFRemBertLayer, TFRemBertModel, TFRemBertPreTrainedModel, ) else: import sys __A : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
595
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST', 'ViTMSNModel', 'ViTMSNForImageClassification', 'ViTMSNPreTrainedModel', ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
25
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs _SCREAMING_SNAKE_CASE : List[Any] = imread(r'''digital_image_processing/image_data/lena_small.jpg''') _SCREAMING_SNAKE_CASE : Tuple = cvtColor(img, COLOR_BGR2GRAY) def UpperCAmelCase_ ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = cn.convert_to_negative(_A ) # assert negative_img array for at least one True assert negative_img.any() def UpperCAmelCase_ ( ): '''simple docstring''' with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img: # Work around assertion for response assert str(cc.change_contrast(_A , 1_10 ) ).startswith( '''<PIL.Image.Image image mode=RGB size=100x100 at''' ) def UpperCAmelCase_ ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def UpperCAmelCase_ ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 ) # assert ambiguous array for all == True assert canny_img.all() SCREAMING_SNAKE_CASE__ = canny.canny(_A ) # assert canny array for at least one True assert canny_array.any() def UpperCAmelCase_ ( ): '''simple docstring''' assert gg.gaussian_filter(_A , 5 , sigma=0.9 ).all() def UpperCAmelCase_ ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]] ) SCREAMING_SNAKE_CASE__ = conv.img_convolve(_A , _A ).astype(_A ) assert res.any() def UpperCAmelCase_ ( ): '''simple docstring''' assert med.median_filter(_A , 3 ).any() def UpperCAmelCase_ ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = sob.sobel_filter(_A ) assert grad.any() and theta.any() def UpperCAmelCase_ ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = sp.make_sepia(_A , 20 ) assert sepia.all() def UpperCAmelCase_ ( _A = "digital_image_processing/image_data/lena_small.jpg" ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = bs.Burkes(imread(_A , 1 ) , 1_20 ) burkes.process() assert burkes.output_img.any() def UpperCAmelCase_ ( _A = "digital_image_processing/image_data/lena_small.jpg" , ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = rs.NearestNeighbour(imread(_A , 1 ) , 4_00 , 2_00 ) nn.process() assert nn.output.any() def UpperCAmelCase_ ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = '''digital_image_processing/image_data/lena.jpg''' # Reading the image and converting it to grayscale. SCREAMING_SNAKE_CASE__ = imread(_A , 0 ) # Test for get_neighbors_pixel function() return not None SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = image[x_coordinate][y_coordinate] SCREAMING_SNAKE_CASE__ = lbp.get_neighbors_pixel( _A , _A , _A , _A ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image SCREAMING_SNAKE_CASE__ = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): SCREAMING_SNAKE_CASE__ = lbp.local_binary_value(_A , _A , _A ) assert lbp_image.any()
493
0
'''simple docstring''' import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: _UpperCAmelCase : List[str] = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class lowerCAmelCase_ ( unittest.TestCase ): def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any]=7 , SCREAMING_SNAKE_CASE_ : Dict=3 , SCREAMING_SNAKE_CASE_ : Optional[int]=18 , SCREAMING_SNAKE_CASE_ : List[str]=30 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=400 , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Dict=None , ): lowerCAmelCase__ = size if size is not None else {'''height''': 20, '''width''': 20} lowerCAmelCase__ = parent lowerCAmelCase__ = batch_size lowerCAmelCase__ = num_channels lowerCAmelCase__ = image_size lowerCAmelCase__ = min_resolution lowerCAmelCase__ = max_resolution lowerCAmelCase__ = size lowerCAmelCase__ = do_normalize lowerCAmelCase__ = do_convert_rgb lowerCAmelCase__ = [512, 1_024, 2_048, 4_096] lowerCAmelCase__ = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16} def __snake_case ( self : str ): return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def __snake_case ( self : int ): lowerCAmelCase__ = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg''' lowerCAmelCase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ).convert('''RGB''' ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , ) @require_torch @require_vision class lowerCAmelCase_ ( snake_case__ , unittest.TestCase ): UpperCamelCase_ :Tuple = PixaStructImageProcessor if is_vision_available() else None def __snake_case ( self : Tuple ): lowerCAmelCase__ = PixaStructImageProcessingTester(self ) @property def __snake_case ( self : List[Any] ): return self.image_processor_tester.prepare_image_processor_dict() def __snake_case ( self : Optional[Any] ): lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_normalize''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_convert_rgb''' ) ) def __snake_case ( self : List[str] ): lowerCAmelCase__ = self.image_processor_tester.prepare_dummy_image() lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) lowerCAmelCase__ = 2_048 lowerCAmelCase__ = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_606 ) , atol=1e-3 , rtol=1e-3 ) ) def __snake_case ( self : int ): # Initialize image_processor lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image ) # Test not batched input lowerCAmelCase__ = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowerCAmelCase__ = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched lowerCAmelCase__ = image_processor( SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __snake_case ( self : Optional[Any] ): # Initialize image_processor lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image ) # Test not batched input lowerCAmelCase__ = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 lowerCAmelCase__ = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(SCREAMING_SNAKE_CASE_ ): lowerCAmelCase__ = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE_ ).flattened_patches lowerCAmelCase__ = '''Hello''' lowerCAmelCase__ = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE_ , header_text=SCREAMING_SNAKE_CASE_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched lowerCAmelCase__ = image_processor( SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE_ , header_text=SCREAMING_SNAKE_CASE_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __snake_case ( self : str ): # Initialize image_processor lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) lowerCAmelCase__ = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowerCAmelCase__ = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched lowerCAmelCase__ = image_processor( SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __snake_case ( self : Union[str, Any] ): # Initialize image_processor lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ) # Test not batched input lowerCAmelCase__ = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowerCAmelCase__ = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched lowerCAmelCase__ = image_processor( SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , ) @require_torch @require_vision class lowerCAmelCase_ ( snake_case__ , unittest.TestCase ): UpperCamelCase_ :Optional[int] = PixaStructImageProcessor if is_vision_available() else None def __snake_case ( self : Optional[Any] ): lowerCAmelCase__ = PixaStructImageProcessingTester(self , num_channels=4 ) lowerCAmelCase__ = 3 @property def __snake_case ( self : Optional[int] ): return self.image_processor_tester.prepare_image_processor_dict() def __snake_case ( self : Optional[Any] ): lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_normalize''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_convert_rgb''' ) ) def __snake_case ( self : Tuple ): # Initialize image_processor lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image ) # Test not batched input lowerCAmelCase__ = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowerCAmelCase__ = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched lowerCAmelCase__ = image_processor( SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
717
from __future__ import annotations def lowerCAmelCase_ (lowercase__ : list[int] ) -> bool: '''simple docstring''' return len(set(lowercase__ ) ) == len(lowercase__ ) if __name__ == "__main__": import doctest doctest.testmod()
288
0
'''simple docstring''' import argparse import requests import torch from PIL import Image from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: """simple docstring""" if "cls_token" in name: _SCREAMING_SNAKE_CASE = name.replace("""cls_token""" , """vit.embeddings.cls_token""" ) if "mask_token" in name: _SCREAMING_SNAKE_CASE = name.replace("""mask_token""" , """decoder.mask_token""" ) if "decoder_pos_embed" in name: _SCREAMING_SNAKE_CASE = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" ) if "pos_embed" in name and "decoder" not in name: _SCREAMING_SNAKE_CASE = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" ) if "patch_embed.proj" in name: _SCREAMING_SNAKE_CASE = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: _SCREAMING_SNAKE_CASE = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" ) if "decoder_blocks" in name: _SCREAMING_SNAKE_CASE = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" ) if "blocks" in name: _SCREAMING_SNAKE_CASE = name.replace("""blocks""" , """vit.encoder.layer""" ) if "attn.proj" in name: _SCREAMING_SNAKE_CASE = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: _SCREAMING_SNAKE_CASE = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: _SCREAMING_SNAKE_CASE = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: _SCREAMING_SNAKE_CASE = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: _SCREAMING_SNAKE_CASE = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: _SCREAMING_SNAKE_CASE = name.replace("""mlp.fc2""" , """output.dense""" ) if "decoder_embed" in name: _SCREAMING_SNAKE_CASE = name.replace("""decoder_embed""" , """decoder.decoder_embed""" ) if "decoder_norm" in name: _SCREAMING_SNAKE_CASE = name.replace("""decoder_norm""" , """decoder.decoder_norm""" ) if "decoder_pred" in name: _SCREAMING_SNAKE_CASE = name.replace("""decoder_pred""" , """decoder.decoder_pred""" ) if "norm.weight" in name and "decoder" not in name: _SCREAMING_SNAKE_CASE = name.replace("""norm.weight""" , """vit.layernorm.weight""" ) if "norm.bias" in name and "decoder" not in name: _SCREAMING_SNAKE_CASE = name.replace("""norm.bias""" , """vit.layernorm.bias""" ) return name def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: """simple docstring""" for key in orig_state_dict.copy().keys(): _SCREAMING_SNAKE_CASE = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ ) if "qkv" in key: _SCREAMING_SNAKE_CASE = key.split(""".""" ) _SCREAMING_SNAKE_CASE = int(key_split[1] ) if "decoder_blocks" in key: _SCREAMING_SNAKE_CASE = config.decoder_hidden_size _SCREAMING_SNAKE_CASE = """decoder.decoder_layers.""" if "weight" in key: _SCREAMING_SNAKE_CASE = val[:dim, :] _SCREAMING_SNAKE_CASE = val[dim : dim * 2, :] _SCREAMING_SNAKE_CASE = val[-dim:, :] elif "bias" in key: _SCREAMING_SNAKE_CASE = val[:dim] _SCREAMING_SNAKE_CASE = val[dim : dim * 2] _SCREAMING_SNAKE_CASE = val[-dim:] else: _SCREAMING_SNAKE_CASE = config.hidden_size _SCREAMING_SNAKE_CASE = """vit.encoder.layer.""" if "weight" in key: _SCREAMING_SNAKE_CASE = val[:dim, :] _SCREAMING_SNAKE_CASE = val[dim : dim * 2, :] _SCREAMING_SNAKE_CASE = val[-dim:, :] elif "bias" in key: _SCREAMING_SNAKE_CASE = val[:dim] _SCREAMING_SNAKE_CASE = val[dim : dim * 2] _SCREAMING_SNAKE_CASE = val[-dim:] else: _SCREAMING_SNAKE_CASE = val return orig_state_dict def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = ViTMAEConfig() if "large" in checkpoint_url: _SCREAMING_SNAKE_CASE = 10_24 _SCREAMING_SNAKE_CASE = 40_96 _SCREAMING_SNAKE_CASE = 24 _SCREAMING_SNAKE_CASE = 16 elif "huge" in checkpoint_url: _SCREAMING_SNAKE_CASE = 14 _SCREAMING_SNAKE_CASE = 12_80 _SCREAMING_SNAKE_CASE = 51_20 _SCREAMING_SNAKE_CASE = 32 _SCREAMING_SNAKE_CASE = 16 _SCREAMING_SNAKE_CASE = ViTMAEForPreTraining(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )["""model"""] _SCREAMING_SNAKE_CASE = ViTMAEImageProcessor(size=config.image_size ) _SCREAMING_SNAKE_CASE = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) model.load_state_dict(SCREAMING_SNAKE_CASE_ ) model.eval() _SCREAMING_SNAKE_CASE = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg""" _SCREAMING_SNAKE_CASE = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) _SCREAMING_SNAKE_CASE = ViTMAEImageProcessor(size=config.image_size ) _SCREAMING_SNAKE_CASE = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ) # forward pass torch.manual_seed(2 ) _SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = outputs.logits if "large" in checkpoint_url: _SCREAMING_SNAKE_CASE = torch.tensor( [[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] ) elif "huge" in checkpoint_url: _SCREAMING_SNAKE_CASE = torch.tensor( [[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] ) else: _SCREAMING_SNAKE_CASE = torch.tensor( [[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] ) # verify logits assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) print(F"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth", type=str, help="URL of the checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) UpperCamelCase__ : List[Any] = parser.parse_args() convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
591
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version UpperCamelCase__ : Optional[Any] = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt") @dataclass class _a : """simple docstring""" SCREAMING_SNAKE_CASE = field( default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'}) SCREAMING_SNAKE_CASE = field( default=_lowerCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}) SCREAMING_SNAKE_CASE = field( default=_lowerCamelCase , metadata={'help': 'The column name of the images in the files.'}) SCREAMING_SNAKE_CASE = field(default=_lowerCamelCase , metadata={'help': 'A folder containing the training data.'}) SCREAMING_SNAKE_CASE = field(default=_lowerCamelCase , metadata={'help': 'A folder containing the validation data.'}) SCREAMING_SNAKE_CASE = field( default=0.15 , metadata={'help': 'Percent to split off of train for validation.'}) SCREAMING_SNAKE_CASE = field( default=_lowerCamelCase , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) SCREAMING_SNAKE_CASE = field( default=_lowerCamelCase , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) def UpperCamelCase ( self ) -> Any: _SCREAMING_SNAKE_CASE = {} if self.train_dir is not None: _SCREAMING_SNAKE_CASE = self.train_dir if self.validation_dir is not None: _SCREAMING_SNAKE_CASE = self.validation_dir _SCREAMING_SNAKE_CASE = data_files if data_files else None @dataclass class _a : """simple docstring""" SCREAMING_SNAKE_CASE = field( default=_lowerCamelCase , metadata={ 'help': ( 'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.' ) } , ) SCREAMING_SNAKE_CASE = field( default=_lowerCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'}) SCREAMING_SNAKE_CASE = field( default=_lowerCamelCase , metadata={ 'help': ( 'Override some existing default config settings when a model is trained from scratch. Example: ' 'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index' ) } , ) SCREAMING_SNAKE_CASE = field( default=_lowerCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'}) SCREAMING_SNAKE_CASE = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) SCREAMING_SNAKE_CASE = field(default=_lowerCamelCase , metadata={'help': 'Name or path of preprocessor config.'}) SCREAMING_SNAKE_CASE = field( default=_lowerCamelCase , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) SCREAMING_SNAKE_CASE = field( default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'}) SCREAMING_SNAKE_CASE = field( default=_lowerCamelCase , metadata={'help': 'Whether or not to train with normalized pixel values as target.'}) @dataclass class _a (_lowerCamelCase): """simple docstring""" SCREAMING_SNAKE_CASE = field( default=1E-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'}) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" _SCREAMING_SNAKE_CASE = torch.stack([example["""pixel_values"""] for example in examples] ) return {"pixel_values": pixel_values} def lowerCAmelCase_ ( ) -> List[str]: """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mae""" , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _SCREAMING_SNAKE_CASE = training_args.get_process_log_level() logger.setLevel(SCREAMING_SNAKE_CASE_ ) transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(F"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. _SCREAMING_SNAKE_CASE = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. " """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. _SCREAMING_SNAKE_CASE = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _SCREAMING_SNAKE_CASE = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , SCREAMING_SNAKE_CASE_ ) and data_args.train_val_split > 0.0: _SCREAMING_SNAKE_CASE = ds["""train"""].train_test_split(data_args.train_val_split ) _SCREAMING_SNAKE_CASE = split["""train"""] _SCREAMING_SNAKE_CASE = split["""test"""] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _SCREAMING_SNAKE_CASE = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name: _SCREAMING_SNAKE_CASE = ViTMAEConfig.from_pretrained(model_args.config_name , **SCREAMING_SNAKE_CASE_ ) elif model_args.model_name_or_path: _SCREAMING_SNAKE_CASE = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE_ ) else: _SCREAMING_SNAKE_CASE = ViTMAEConfig() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(F"Overriding config: {model_args.config_overrides}" ) config.update_from_string(model_args.config_overrides ) logger.info(F"New config: {config}" ) # adapt config config.update( { """mask_ratio""": model_args.mask_ratio, """norm_pix_loss""": model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: _SCREAMING_SNAKE_CASE = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **SCREAMING_SNAKE_CASE_ ) elif model_args.model_name_or_path: _SCREAMING_SNAKE_CASE = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE_ ) else: _SCREAMING_SNAKE_CASE = ViTImageProcessor() # create model if model_args.model_name_or_path: _SCREAMING_SNAKE_CASE = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) _SCREAMING_SNAKE_CASE = ViTMAEForPreTraining(SCREAMING_SNAKE_CASE_ ) if training_args.do_train: _SCREAMING_SNAKE_CASE = ds["""train"""].column_names else: _SCREAMING_SNAKE_CASE = ds["""validation"""].column_names if data_args.image_column_name is not None: _SCREAMING_SNAKE_CASE = data_args.image_column_name elif "image" in column_names: _SCREAMING_SNAKE_CASE = """image""" elif "img" in column_names: _SCREAMING_SNAKE_CASE = """img""" else: _SCREAMING_SNAKE_CASE = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: _SCREAMING_SNAKE_CASE = image_processor.size["""shortest_edge"""] else: _SCREAMING_SNAKE_CASE = (image_processor.size["""height"""], image_processor.size["""width"""]) _SCREAMING_SNAKE_CASE = Compose( [ Lambda(lambda SCREAMING_SNAKE_CASE_ : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(SCREAMING_SNAKE_CASE_ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = [transforms(SCREAMING_SNAKE_CASE_ ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: _SCREAMING_SNAKE_CASE = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(SCREAMING_SNAKE_CASE_ ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: _SCREAMING_SNAKE_CASE = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(SCREAMING_SNAKE_CASE_ ) # Compute absolute learning rate _SCREAMING_SNAKE_CASE = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: _SCREAMING_SNAKE_CASE = training_args.base_learning_rate * total_train_batch_size / 2_56 # Initialize our trainer _SCREAMING_SNAKE_CASE = Trainer( model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , ) # Training if training_args.do_train: _SCREAMING_SNAKE_CASE = None if training_args.resume_from_checkpoint is not None: _SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint elif last_checkpoint is not None: _SCREAMING_SNAKE_CASE = last_checkpoint _SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE_ ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _SCREAMING_SNAKE_CASE = trainer.evaluate() trainer.log_metrics("""eval""" , SCREAMING_SNAKE_CASE_ ) trainer.save_metrics("""eval""" , SCREAMING_SNAKE_CASE_ ) # Write model card and (optionally) push to hub _SCREAMING_SNAKE_CASE = { """tasks""": """masked-auto-encoding""", """dataset""": data_args.dataset_name, """tags""": ["""masked-auto-encoding"""], } if training_args.push_to_hub: trainer.push_to_hub(**SCREAMING_SNAKE_CASE_ ) else: trainer.create_model_card(**SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> List[str]: """simple docstring""" # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
591
1
from itertools import product def UpperCamelCase__ ( A__ , A__ ) -> list[int]: snake_case__ : Optional[Any] = sides_number snake_case__ : Any = max_face_number * dice_number snake_case__ : Optional[int] = [0] * (max_total + 1) snake_case__ : Optional[int] = 1 snake_case__ : str = range(A__ , max_face_number + 1 ) for dice_numbers in product(A__ , repeat=A__ ): snake_case__ : Optional[int] = sum(A__ ) totals_frequencies[total] += 1 return totals_frequencies def UpperCamelCase__ ( ) -> float: snake_case__ : int = total_frequency_distribution( sides_number=4 , dice_number=9 ) snake_case__ : Any = total_frequency_distribution( sides_number=6 , dice_number=6 ) snake_case__ : Tuple = 0 snake_case__ : List[str] = 9 snake_case__ : Any = 4 * 9 snake_case__ : Any = 6 for peter_total in range(A__ , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) snake_case__ : str = (4**9) * (6**6) snake_case__ : Any = peter_wins_count / total_games_number snake_case__ : List[str] = round(A__ , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(F'''{solution() = }''')
717
import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel lowerCAmelCase__ : List[str] = HfApi() lowerCAmelCase__ : str = {} # fmt: off lowerCAmelCase__ : int = torch.tensor([ -0.75_15, -1.68_83, 0.24_20, 0.03_00, 0.63_47, 1.34_33, -1.17_43, -3.74_67, 1.23_42, -2.24_85, 0.46_36, 0.80_76, -0.79_91, 0.39_69, 0.84_98, 0.91_89, -1.88_87, -3.35_22, 0.76_39, 0.20_40, 0.62_71, -2.71_48, -1.63_16, 3.08_39, 0.31_86, 0.27_21, -0.97_59, -1.24_61, 2.62_57, 1.35_57 ]) lowerCAmelCase__ : Dict = torch.tensor([ -2.36_39, -2.53_44, 0.00_54, -0.66_74, 1.59_90, 1.01_58, 0.31_24, -2.14_36, 1.87_95, -2.54_29, -0.15_66, -0.39_73, 1.24_90, 2.64_47, 1.22_83, -0.52_08, -2.81_54, -3.51_19, 2.38_38, 1.20_33, 1.72_01, -2.12_56, -1.45_76, 2.79_48, 2.42_04, -0.97_52, -1.25_46, 0.80_27, 3.27_58, 3.13_65 ]) lowerCAmelCase__ : Dict = torch.tensor([ -0.65_31, -0.68_91, -0.31_72, -0.53_75, -0.91_40, -0.53_67, -0.11_75, -0.78_69, -0.38_08, -0.45_13, -0.20_98, -0.00_83, 0.31_83, 0.51_40, 0.22_47, -0.13_04, -0.13_02, -0.28_02, -0.20_84, -0.20_25, -0.49_67, -0.48_73, -0.08_61, 0.69_25, 0.02_50, 0.12_90, -0.15_43, 0.63_16, 1.04_60, 1.49_43 ]) lowerCAmelCase__ : List[str] = torch.tensor([ 0.09_11, 0.11_07, 0.01_82, 0.04_35, -0.08_05, -0.06_08, 0.03_81, 0.21_72, -0.02_80, 0.13_27, -0.02_99, -0.02_55, -0.00_50, -0.11_70, -0.10_46, 0.03_09, 0.13_67, 0.17_28, -0.05_33, -0.07_48, -0.05_34, 0.16_24, 0.03_84, -0.18_05, -0.07_07, 0.06_42, 0.02_20, -0.01_34, -0.13_33, -0.15_05 ]) lowerCAmelCase__ : Union[str, Any] = torch.tensor([ 0.13_21, 0.13_37, 0.04_40, 0.06_22, -0.05_91, -0.03_70, 0.05_03, 0.21_33, -0.01_77, 0.14_15, -0.01_16, -0.01_12, 0.00_44, -0.09_80, -0.07_89, 0.03_95, 0.15_02, 0.17_85, -0.04_88, -0.05_14, -0.04_04, 0.15_39, 0.04_54, -0.15_59, -0.06_65, 0.06_59, 0.03_83, -0.00_05, -0.12_66, -0.13_86 ]) lowerCAmelCase__ : List[Any] = torch.tensor([ 0.11_54, 0.12_18, 0.03_07, 0.05_26, -0.07_11, -0.05_41, 0.03_66, 0.20_78, -0.02_67, 0.13_17, -0.02_26, -0.01_93, -0.00_14, -0.10_55, -0.09_02, 0.03_30, 0.13_91, 0.17_09, -0.05_62, -0.06_93, -0.05_60, 0.14_82, 0.03_81, -0.16_83, -0.06_81, 0.06_61, 0.03_31, -0.00_46, -0.12_68, -0.14_31 ]) lowerCAmelCase__ : Optional[Any] = torch.tensor([ 0.11_92, 0.12_40, 0.04_14, 0.06_06, -0.05_57, -0.04_12, 0.04_30, 0.20_42, -0.02_00, 0.13_85, -0.01_15, -0.01_32, 0.00_17, -0.09_65, -0.08_02, 0.03_98, 0.14_33, 0.17_47, -0.04_58, -0.05_33, -0.04_07, 0.15_45, 0.04_19, -0.15_74, -0.06_45, 0.06_26, 0.03_41, -0.00_10, -0.11_99, -0.13_90 ]) lowerCAmelCase__ : List[str] = torch.tensor([ 0.10_75, 0.10_74, 0.02_05, 0.04_31, -0.07_74, -0.06_07, 0.02_98, 0.20_42, -0.03_20, 0.12_67, -0.02_81, -0.02_50, -0.00_64, -0.10_91, -0.09_46, 0.02_90, 0.13_28, 0.16_50, -0.05_80, -0.07_38, -0.05_86, 0.14_40, 0.03_37, -0.17_46, -0.07_12, 0.06_05, 0.02_50, -0.00_99, -0.13_16, -0.14_73 ]) lowerCAmelCase__ : List[str] = torch.tensor([ -1.45_72, -2.04_81, -0.04_14, -0.60_05, 1.41_36, 0.58_48, 0.40_28, -2.73_30, 1.22_12, -2.12_28, 0.21_55, 0.40_39, 0.76_62, 2.05_35, 0.74_77, -0.32_43, -2.17_58, -2.76_48, 1.69_47, 0.70_26, 1.23_38, -1.60_78, -0.86_82, 2.28_10, 1.85_74, -0.57_18, -0.55_86, -0.01_86, 2.34_15, 2.12_51]) lowerCAmelCase__ : List[Any] = torch.tensor([ -1.36_90, -1.97_20, -0.40_90, -0.69_66, 1.46_60, 0.99_38, -0.13_85, -2.73_24, 0.77_36, -1.89_17, 0.29_23, 0.42_93, 0.16_93, 1.41_12, 1.18_87, -0.31_81, -2.21_60, -2.63_81, 1.31_70, 0.81_63, 0.92_40, -1.65_44, -0.60_99, 2.52_59, 1.64_30, -0.90_90, -0.93_92, -0.01_26, 2.42_68, 2.32_66 ]) lowerCAmelCase__ : Tuple = torch.tensor([ -1.35_25, -1.96_28, -0.39_56, -0.68_60, 1.46_64, 1.00_14, -0.12_59, -2.72_12, 0.77_72, -1.88_11, 0.29_96, 0.43_88, 0.17_04, 1.40_29, 1.17_01, -0.30_27, -2.20_53, -2.62_87, 1.33_50, 0.81_31, 0.92_74, -1.62_92, -0.60_98, 2.51_31, 1.65_05, -0.89_58, -0.92_98, -0.01_51, 2.42_57, 2.33_55 ]) lowerCAmelCase__ : List[str] = torch.tensor([ -2.05_85, -2.78_97, -0.28_50, -0.89_40, 1.90_52, 0.57_02, 0.63_45, -3.89_59, 1.59_32, -3.23_19, 0.19_74, 0.02_87, 1.75_66, 2.65_43, 0.83_87, -0.53_51, -3.27_36, -4.33_75, 2.90_29, 1.63_90, 1.46_40, -2.17_01, -1.90_13, 2.93_41, 3.49_81, -0.62_55, -1.16_44, -0.15_91, 3.70_97, 3.20_66 ]) lowerCAmelCase__ : Dict = torch.tensor([ -2.31_39, -2.55_94, -0.01_97, -0.67_85, 1.70_01, 1.16_06, 0.30_75, -2.17_40, 1.80_71, -2.56_30, -0.09_26, -0.38_11, 1.21_16, 2.62_46, 1.27_31, -0.53_98, -2.81_53, -3.61_40, 2.38_93, 1.32_62, 1.62_58, -2.18_56, -1.32_67, 2.83_95, 2.37_79, -1.06_23, -1.24_68, 0.89_59, 3.33_67, 3.22_43 ]) lowerCAmelCase__ : Dict = torch.tensor([ -2.06_28, -2.76_67, -0.20_89, -0.82_63, 2.05_39, 0.59_92, 0.64_95, -3.83_36, 1.60_25, -3.28_17, 0.17_21, -0.06_33, 1.75_16, 2.70_39, 0.81_00, -0.59_08, -3.21_13, -4.43_43, 2.92_57, 1.36_32, 1.55_62, -2.14_89, -1.98_94, 3.05_60, 3.33_96, -0.73_28, -1.04_17, 0.03_83, 3.70_93, 3.23_43 ]) lowerCAmelCase__ : Any = torch.tensor([ -1.45_74, -2.05_69, -0.04_73, -0.61_17, 1.40_18, 0.57_69, 0.41_29, -2.73_44, 1.22_41, -2.13_97, 0.20_00, 0.39_37, 0.76_16, 2.04_53, 0.73_24, -0.33_91, -2.17_46, -2.77_44, 1.69_63, 0.69_21, 1.21_87, -1.61_72, -0.88_77, 2.24_39, 1.84_71, -0.58_39, -0.56_05, -0.04_64, 2.32_50, 2.12_19 ]) # fmt: on lowerCAmelCase__ : Any = api.list_models(filter='''diffusers''') for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": lowerCAmelCase__ : List[str] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1] print(F'''Started running {mod.modelId}!!!''') if mod.modelId.startswith('''CompVis'''): lowerCAmelCase__ : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''') else: lowerCAmelCase__ : str = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) lowerCAmelCase__ : Any = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) lowerCAmelCase__ : List[str] = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): lowerCAmelCase__ : int = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3 ) print(F'''{mod.modelId} has passed successfully!!!''')
699
0
import unittest from transformers import GPTNeoXJapaneseConfig, is_torch_available from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel class lowercase : '''simple docstring''' def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=4 , __a="gelu" , __a=0.0 , __a=0.1 , __a=True , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> Tuple: """simple docstring""" UpperCAmelCase__ = parent UpperCAmelCase__ = batch_size UpperCAmelCase__ = seq_length UpperCAmelCase__ = is_training UpperCAmelCase__ = use_input_mask UpperCAmelCase__ = use_token_type_ids UpperCAmelCase__ = use_labels UpperCAmelCase__ = vocab_size UpperCAmelCase__ = hidden_size UpperCAmelCase__ = num_hidden_layers UpperCAmelCase__ = num_attention_heads UpperCAmelCase__ = intermediate_multiple_size UpperCAmelCase__ = hidden_act UpperCAmelCase__ = hidden_dropout UpperCAmelCase__ = attention_dropout UpperCAmelCase__ = weight_tying UpperCAmelCase__ = max_position_embeddings UpperCAmelCase__ = type_vocab_size UpperCAmelCase__ = type_sequence_label_size UpperCAmelCase__ = initializer_range UpperCAmelCase__ = num_labels UpperCAmelCase__ = num_choices UpperCAmelCase__ = scope def UpperCamelCase__ (self ) -> Tuple: """simple docstring""" UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase__ = None if self.use_input_mask: UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase__ = None if self.use_labels: UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase__ = self.get_config() return config, input_ids, input_mask, token_labels def UpperCamelCase__ (self ) -> Union[str, Any]: """simple docstring""" return GPTNeoXJapaneseConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , ) def UpperCamelCase__ (self ) -> Dict: """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.prepare_config_and_inputs() UpperCAmelCase__ = True return config, input_ids, input_mask, token_labels def UpperCamelCase__ (self , __a , __a , __a ) -> List[Any]: """simple docstring""" UpperCAmelCase__ = GPTNeoXJapaneseModel(config=__a ) model.to(__a ) model.eval() UpperCAmelCase__ = model(__a , attention_mask=__a ) UpperCAmelCase__ = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ (self , __a , __a , __a ) -> int: """simple docstring""" UpperCAmelCase__ = True UpperCAmelCase__ = GPTNeoXJapaneseModel(__a ) model.to(__a ) model.eval() UpperCAmelCase__ = model(__a , attention_mask=__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ (self , __a , __a , __a , __a ) -> List[str]: """simple docstring""" UpperCAmelCase__ = GPTNeoXJapaneseForCausalLM(config=__a ) model.to(__a ) model.eval() UpperCAmelCase__ = model(__a , attention_mask=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ (self , __a , __a , __a ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase__ = True UpperCAmelCase__ = GPTNeoXJapaneseForCausalLM(config=__a ) model.to(__a ) model.eval() # first forward pass UpperCAmelCase__ = model(__a , attention_mask=__a , use_cache=__a ) UpperCAmelCase__ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase__ = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCAmelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase__ = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCAmelCase__ = model(__a , attention_mask=__a , output_hidden_states=__a ) UpperCAmelCase__ = output_from_no_past['hidden_states'][0] UpperCAmelCase__ = model( __a , attention_mask=__a , past_key_values=__a , output_hidden_states=__a , )['hidden_states'][0] # select random slice UpperCAmelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__a , __a , atol=1E-3 ) ) def UpperCamelCase__ (self ) -> List[Any]: """simple docstring""" UpperCAmelCase__ = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs UpperCAmelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): '''simple docstring''' __SCREAMING_SNAKE_CASE = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else () __SCREAMING_SNAKE_CASE = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else () __SCREAMING_SNAKE_CASE = ( {"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM} if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def UpperCamelCase__ (self ) -> Tuple: """simple docstring""" UpperCAmelCase__ = GPTNeoXJapaneseModelTester(self ) UpperCAmelCase__ = ConfigTester(self , config_class=__a , hidden_size=37 ) def UpperCamelCase__ (self ) -> Any: """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ (self ) -> List[str]: """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(__a , __a , __a ) def UpperCamelCase__ (self ) -> List[str]: """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(__a , __a , __a ) def UpperCamelCase__ (self ) -> str: """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder() UpperCAmelCase__ = None self.model_tester.create_and_check_model_as_decoder(__a , __a , __a ) def UpperCamelCase__ (self ) -> List[Any]: """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(__a , __a , __a ) def UpperCamelCase__ (self ) -> int: """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*__a ) @slow def UpperCamelCase__ (self ) -> int: """simple docstring""" UpperCAmelCase__ = 'abeja/gpt-neox-japanese-2.7b' UpperCAmelCase__ = ['データサイエンティストとは、', '100年後に必要とされる会社は、', 'フルリモートの環境で働くために必要なことは、', '国境の長いトンネルを抜けると', '美味しい日本食といえば、'] UpperCAmelCase__ = [ 'データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。', '100年後に必要とされる会社は、「人」が中心の会社です。', 'フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。', '国境の長いトンネルを抜けると、そこは雪国だった。', '美味しい日本食といえば、やっぱりお寿司ですよね。', ] UpperCAmelCase__ = GPTNeoXJapaneseTokenizer.from_pretrained(__a ) UpperCAmelCase__ = GPTNeoXJapaneseForCausalLM.from_pretrained(__a ) UpperCAmelCase__ = [] for prompt in prompts: UpperCAmelCase__ = tokenizer(__a , return_tensors='pt' ).input_ids UpperCAmelCase__ = model.generate(__a , max_length=50 ) UpperCAmelCase__ = tokenizer.batch_decode(__a , skip_special_tokens=__a ) predicted_outputs += generated_string self.assertListEqual(__a , __a )
146
from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class lowercase : '''simple docstring''' __SCREAMING_SNAKE_CASE = BlenderbotSmallConfig __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = """gelu""" def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=False , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a=0.1 , __a=0.1 , __a=20 , __a=2 , __a=1 , __a=0 , ) -> List[Any]: """simple docstring""" UpperCAmelCase__ = parent UpperCAmelCase__ = batch_size UpperCAmelCase__ = seq_length UpperCAmelCase__ = is_training UpperCAmelCase__ = use_labels UpperCAmelCase__ = vocab_size UpperCAmelCase__ = hidden_size UpperCAmelCase__ = num_hidden_layers UpperCAmelCase__ = num_attention_heads UpperCAmelCase__ = intermediate_size UpperCAmelCase__ = hidden_dropout_prob UpperCAmelCase__ = attention_probs_dropout_prob UpperCAmelCase__ = max_position_embeddings UpperCAmelCase__ = eos_token_id UpperCAmelCase__ = pad_token_id UpperCAmelCase__ = bos_token_id def UpperCamelCase__ (self ) -> Any: """simple docstring""" UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) UpperCAmelCase__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) UpperCAmelCase__ = tf.concat([input_ids, eos_tensor] , axis=1 ) UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase__ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) UpperCAmelCase__ = prepare_blenderbot_small_inputs_dict(__a , __a , __a ) return config, inputs_dict def UpperCamelCase__ (self , __a , __a ) -> Optional[Any]: """simple docstring""" UpperCAmelCase__ = TFBlenderbotSmallModel(config=__a ).get_decoder() UpperCAmelCase__ = inputs_dict['input_ids'] UpperCAmelCase__ = input_ids[:1, :] UpperCAmelCase__ = inputs_dict['attention_mask'][:1, :] UpperCAmelCase__ = inputs_dict['head_mask'] UpperCAmelCase__ = 1 # first forward pass UpperCAmelCase__ = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a ) UpperCAmelCase__ , UpperCAmelCase__ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids UpperCAmelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and UpperCAmelCase__ = tf.concat([input_ids, next_tokens] , axis=-1 ) UpperCAmelCase__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) UpperCAmelCase__ = model(__a , attention_mask=__a )[0] UpperCAmelCase__ = model(__a , attention_mask=__a , past_key_values=__a )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice UpperCAmelCase__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) UpperCAmelCase__ = output_from_no_past[:, -3:, random_slice_idx] UpperCAmelCase__ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__a , __a , rtol=1E-3 ) def UpperCamelCase_( snake_case__: Any , snake_case__: List[str] , snake_case__: Dict , snake_case__: Any=None , snake_case__: int=None , snake_case__: int=None , snake_case__: int=None , snake_case__: Optional[int]=None , ) -> int: if attention_mask is None: UpperCAmelCase__ = tf.cast(tf.math.not_equal(snake_case__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: UpperCAmelCase__ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: UpperCAmelCase__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCAmelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) __SCREAMING_SNAKE_CASE = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () __SCREAMING_SNAKE_CASE = ( { """conversational""": TFBlenderbotSmallForConditionalGeneration, """feature-extraction""": TFBlenderbotSmallModel, """summarization""": TFBlenderbotSmallForConditionalGeneration, """text2text-generation""": TFBlenderbotSmallForConditionalGeneration, """translation""": TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def UpperCamelCase__ (self ) -> List[Any]: """simple docstring""" UpperCAmelCase__ = TFBlenderbotSmallModelTester(self ) UpperCAmelCase__ = ConfigTester(self , config_class=__a ) def UpperCamelCase__ (self ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ (self ) -> str: """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__a ) @require_tokenizers @require_tf class lowercase ( unittest.TestCase ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [ """Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like """ """ i'm going to throw up.\nand why is that?""" ] __SCREAMING_SNAKE_CASE = """facebook/blenderbot_small-90M""" @cached_property def UpperCamelCase__ (self ) -> Optional[Any]: """simple docstring""" return BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' ) @cached_property def UpperCamelCase__ (self ) -> Any: """simple docstring""" UpperCAmelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def UpperCamelCase__ (self ) -> List[str]: """simple docstring""" UpperCAmelCase__ = self.tokenizer(self.src_text , return_tensors='tf' ) UpperCAmelCase__ = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__a , ) UpperCAmelCase__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__a )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
146
1
from pathlib import Path import fire from tqdm import tqdm def __lowerCamelCase ( __a : Union[str, Any]="ro" , __a : Any="en" , __a : List[str]="wmt16" , __a : str=None ) -> Dict: try: import datasets except (ModuleNotFoundError, ImportError): raise ImportError("run pip install datasets" ) _lowercase =f'''{src_lang}-{tgt_lang}''' print(f'''Converting {dataset}-{pair}''' ) _lowercase =datasets.load_dataset(lowerCamelCase_ , lowerCamelCase_ ) if save_dir is None: _lowercase =f'''{dataset}-{pair}''' _lowercase =Path(lowerCamelCase_ ) save_dir.mkdir(exist_ok=lowerCamelCase_ ) for split in ds.keys(): print(f'''Splitting {split} with {ds[split].num_rows} records''' ) # to save to val.source, val.target like summary datasets _lowercase ="val" if split == "validation" else split _lowercase =save_dir.joinpath(f'''{fn}.source''' ) _lowercase =save_dir.joinpath(f'''{fn}.target''' ) _lowercase =src_path.open("w+" ) _lowercase =tgt_path.open("w+" ) # reader is the bottleneck so writing one record at a time doesn't slow things down for x in tqdm(ds[split] ): _lowercase =x["translation"] src_fp.write(ex[src_lang] + "\n" ) tgt_fp.write(ex[tgt_lang] + "\n" ) print(f'''Saved {dataset} dataset to {save_dir}''' ) if __name__ == "__main__": fire.Fire(download_wmt_dataset)
712
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } lowerCAmelCase__ = { "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, "tokenizer_config_file": { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" }, } lowerCAmelCase__ = {"facebook/blenderbot-3B": 1_2_8} class _a ( lowerCamelCase_ ): """simple docstring""" __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask'] __SCREAMING_SNAKE_CASE = BlenderbotTokenizer def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="replace" , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<mask>" , lowerCAmelCase_=False , lowerCAmelCase_=True , **lowerCAmelCase_ , ): super().__init__( lowerCAmelCase_ , lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ , **lowerCAmelCase_ , ) _lowercase =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , lowerCAmelCase_ ) != add_prefix_space: _lowercase =getattr(lowerCAmelCase_ , pre_tok_state.pop("type" ) ) _lowercase =add_prefix_space _lowercase =pre_tok_class(**lowerCAmelCase_ ) _lowercase =add_prefix_space _lowercase ="post_processor" _lowercase =getattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_ ) if tokenizer_component_instance: _lowercase =json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _lowercase =tuple(state["sep"] ) if "cls" in state: _lowercase =tuple(state["cls"] ) _lowercase =False if state.get("add_prefix_space" , lowerCAmelCase_ ) != add_prefix_space: _lowercase =add_prefix_space _lowercase =True if state.get("trim_offsets" , lowerCAmelCase_ ) != trim_offsets: _lowercase =trim_offsets _lowercase =True if changes_to_apply: _lowercase =getattr(lowerCAmelCase_ , state.pop("type" ) ) _lowercase =component_class(**lowerCAmelCase_ ) setattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_ ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def __lowerCAmelCase ( self ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def __lowerCAmelCase ( self , lowerCAmelCase_ ): _lowercase =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else value _lowercase =value def __lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ): _lowercase =kwargs.get("is_split_into_words" , lowerCAmelCase_ ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ): _lowercase =kwargs.get("is_split_into_words" , lowerCAmelCase_ ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ): _lowercase =self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ ) return tuple(lowerCAmelCase_ ) def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ): _lowercase =[self.sep_token_id] _lowercase =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ): return token_ids_a + [self.eos_token_id] def __lowerCAmelCase ( self , lowerCAmelCase_ ): _lowercase =[] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(" " + text ) else: # Generated responses should contain them already. inputs.append(lowerCAmelCase_ ) _lowercase =" ".join(lowerCAmelCase_ ) _lowercase =self.encode(lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > self.model_max_length: _lowercase =input_ids[-self.model_max_length :] logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' ) return input_ids
594
0
'''simple docstring''' import unittest from transformers import MobileBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) class UpperCAmelCase : def __init__( self : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : int=1_3 , __lowerCamelCase : Union[str, Any]=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : Dict=9_9 , __lowerCamelCase : Dict=6_4 , __lowerCamelCase : Union[str, Any]=3_2 , __lowerCamelCase : Optional[Any]=5 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : Any=3_7 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : str=5_1_2 , __lowerCamelCase : int=1_6 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : int=0.02 , __lowerCamelCase : str=3 , __lowerCamelCase : Any=4 , __lowerCamelCase : Union[str, Any]=None , ): UpperCAmelCase__ :Dict = parent UpperCAmelCase__ :Optional[Any] = batch_size UpperCAmelCase__ :Union[str, Any] = seq_length UpperCAmelCase__ :Optional[Any] = is_training UpperCAmelCase__ :str = use_input_mask UpperCAmelCase__ :Optional[Any] = use_token_type_ids UpperCAmelCase__ :Optional[Any] = use_labels UpperCAmelCase__ :List[str] = vocab_size UpperCAmelCase__ :Optional[int] = hidden_size UpperCAmelCase__ :Optional[Any] = embedding_size UpperCAmelCase__ :Dict = num_hidden_layers UpperCAmelCase__ :str = num_attention_heads UpperCAmelCase__ :Optional[int] = intermediate_size UpperCAmelCase__ :Tuple = hidden_act UpperCAmelCase__ :Optional[Any] = hidden_dropout_prob UpperCAmelCase__ :Union[str, Any] = attention_probs_dropout_prob UpperCAmelCase__ :Optional[int] = max_position_embeddings UpperCAmelCase__ :Tuple = type_vocab_size UpperCAmelCase__ :Any = type_sequence_label_size UpperCAmelCase__ :Tuple = initializer_range UpperCAmelCase__ :Tuple = num_labels UpperCAmelCase__ :int = num_choices UpperCAmelCase__ :List[str] = scope def __SCREAMING_SNAKE_CASE ( self : Tuple ): UpperCAmelCase__ :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase__ :List[str] = None if self.use_input_mask: UpperCAmelCase__ :Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase__ :Optional[Any] = None if self.use_token_type_ids: UpperCAmelCase__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase__ :int = None UpperCAmelCase__ :Dict = None UpperCAmelCase__ :Tuple = None if self.use_labels: UpperCAmelCase__ :Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ :str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase__ :str = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase__ :Tuple = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __SCREAMING_SNAKE_CASE ( self : Tuple ): return MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str ): UpperCAmelCase__ :Optional[int] = MobileBertModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase__ :Optional[int] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ ) UpperCAmelCase__ :Tuple = model(lowercase_ , token_type_ids=lowercase_ ) UpperCAmelCase__ :Any = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : str ): UpperCAmelCase__ :Dict = MobileBertForMaskedLM(config=lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase__ :List[Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ): UpperCAmelCase__ :List[Any] = MobileBertForNextSentencePrediction(config=lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase__ :str = model( lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __SCREAMING_SNAKE_CASE ( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : int ): UpperCAmelCase__ :Dict = MobileBertForPreTraining(config=lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase__ :Optional[Any] = model( lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , next_sentence_label=lowercase_ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : str ): UpperCAmelCase__ :Any = MobileBertForQuestionAnswering(config=lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase__ :List[str] = model( lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : int ): UpperCAmelCase__ :List[Any] = self.num_labels UpperCAmelCase__ :Optional[Any] = MobileBertForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase__ :List[str] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] ): UpperCAmelCase__ :Dict = self.num_labels UpperCAmelCase__ :Optional[int] = MobileBertForTokenClassification(config=lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase__ :Optional[Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __SCREAMING_SNAKE_CASE ( self : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] ): UpperCAmelCase__ :List[Any] = self.num_choices UpperCAmelCase__ :List[str] = MobileBertForMultipleChoice(config=lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase__ :Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ :int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ :List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ :Dict = model( lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): UpperCAmelCase__ :str = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) :Any = config_and_inputs UpperCAmelCase__ :List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class UpperCAmelCase ( __a , __a , unittest.TestCase ): UpperCAmelCase = ( ( MobileBertModel, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, ) if is_torch_available() else () ) UpperCAmelCase = ( { "feature-extraction": MobileBertModel, "fill-mask": MobileBertForMaskedLM, "question-answering": MobileBertForQuestionAnswering, "text-classification": MobileBertForSequenceClassification, "token-classification": MobileBertForTokenClassification, "zero-shot": MobileBertForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase = True def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Any=False ): UpperCAmelCase__ :str = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ ) if return_labels: if model_class in get_values(lowercase_ ): UpperCAmelCase__ :Union[str, Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase_ ) UpperCAmelCase__ :Optional[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowercase_ ) return inputs_dict def __SCREAMING_SNAKE_CASE ( self : List[str] ): UpperCAmelCase__ :str = MobileBertModelTester(self ) UpperCAmelCase__ :List[Any] = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7 ) def __SCREAMING_SNAKE_CASE ( self : Tuple ): self.config_tester.run_common_tests() def __SCREAMING_SNAKE_CASE ( self : List[Any] ): UpperCAmelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*lowercase_ ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): UpperCAmelCase__ :Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowercase_ ) def __SCREAMING_SNAKE_CASE ( self : Dict ): UpperCAmelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowercase_ ) def __SCREAMING_SNAKE_CASE ( self : str ): UpperCAmelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowercase_ ) def __SCREAMING_SNAKE_CASE ( self : List[Any] ): UpperCAmelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*lowercase_ ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): UpperCAmelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*lowercase_ ) def __SCREAMING_SNAKE_CASE ( self : List[Any] ): UpperCAmelCase__ :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowercase_ ) def __SCREAMING_SNAKE_CASE ( self : Any ): UpperCAmelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*lowercase_ ) def a__ ( UpperCamelCase_ : int ): return torch.tensor( SCREAMING_SNAKE_CASE_, dtype=torch.long, device=SCREAMING_SNAKE_CASE_, ) __lowerCamelCase = 1E-3 @require_torch @require_sentencepiece @require_tokenizers class UpperCAmelCase ( unittest.TestCase ): @slow def __SCREAMING_SNAKE_CASE ( self : str ): UpperCAmelCase__ :List[str] = MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(lowercase_ ) UpperCAmelCase__ :List[str] = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] ) with torch.no_grad(): UpperCAmelCase__ :Tuple = model(lowercase_ )[0] UpperCAmelCase__ :Optional[Any] = torch.Size((1, 9, 5_1_2) ) self.assertEqual(output.shape , lowercase_ ) UpperCAmelCase__ :Union[str, Any] = torch.tensor( [ [ [-2.4_73_65_26e07, 8.2_69_16_56e04, 1.6_52_18_38e05], [-5.7_54_17_04e-01, 3.9_05_60_22e00, 4.4_01_15_07e00], [2.6_04_73_59e00, 1.5_67_76_52e00, -1.7_32_41_88e-01], ] ] , device=lowercase_ , ) # MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a # ~1 difference, it's therefore not a good idea to measure using addition. # Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the # result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE UpperCAmelCase__ :int = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE ) UpperCAmelCase__ :Optional[int] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE ) self.assertTrue(lower_bound and upper_bound )
467
'''simple docstring''' def A_ ( SCREAMING_SNAKE_CASE_ ) ->bool: if number < 0: raise ValueError("""number must not be negative""" ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
451
0
"""simple docstring""" import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class A_ ( __lowerCamelCase ): lowerCAmelCase__ = ['image_processor', 'tokenizer'] lowerCAmelCase__ = 'BlipImageProcessor' lowerCAmelCase__ = 'AutoTokenizer' def __init__( self: Union[str, Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Any ,__lowerCAmelCase: List[str] ): '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) # add QFormer tokenizer _lowerCamelCase : Optional[int] = qformer_tokenizer def __call__( self: List[str] ,__lowerCAmelCase: str = None ,__lowerCAmelCase: Optional[Any] = None ,__lowerCAmelCase: Dict = True ,__lowerCAmelCase: Dict = False ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: str = None ,__lowerCAmelCase: Optional[Any] = 0 ,__lowerCAmelCase: List[Any] = None ,__lowerCAmelCase: Union[str, Any] = None ,__lowerCAmelCase: Union[str, Any] = False ,__lowerCAmelCase: Any = False ,__lowerCAmelCase: Any = False ,__lowerCAmelCase: Union[str, Any] = False ,__lowerCAmelCase: Tuple = False ,__lowerCAmelCase: Union[str, Any] = True ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: List[str] ,): '''simple docstring''' if images is None and text is None: raise ValueError("You have to specify at least images or text." ) _lowerCamelCase : Dict = BatchFeature() if text is not None: _lowerCamelCase : List[Any] = self.tokenizer( text=SCREAMING_SNAKE_CASE_ ,add_special_tokens=SCREAMING_SNAKE_CASE_ ,padding=SCREAMING_SNAKE_CASE_ ,truncation=SCREAMING_SNAKE_CASE_ ,max_length=SCREAMING_SNAKE_CASE_ ,stride=SCREAMING_SNAKE_CASE_ ,pad_to_multiple_of=SCREAMING_SNAKE_CASE_ ,return_attention_mask=SCREAMING_SNAKE_CASE_ ,return_overflowing_tokens=SCREAMING_SNAKE_CASE_ ,return_special_tokens_mask=SCREAMING_SNAKE_CASE_ ,return_offsets_mapping=SCREAMING_SNAKE_CASE_ ,return_token_type_ids=SCREAMING_SNAKE_CASE_ ,return_length=SCREAMING_SNAKE_CASE_ ,verbose=SCREAMING_SNAKE_CASE_ ,return_tensors=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ,) encoding.update(SCREAMING_SNAKE_CASE_ ) _lowerCamelCase : List[Any] = self.qformer_tokenizer( text=SCREAMING_SNAKE_CASE_ ,add_special_tokens=SCREAMING_SNAKE_CASE_ ,padding=SCREAMING_SNAKE_CASE_ ,truncation=SCREAMING_SNAKE_CASE_ ,max_length=SCREAMING_SNAKE_CASE_ ,stride=SCREAMING_SNAKE_CASE_ ,pad_to_multiple_of=SCREAMING_SNAKE_CASE_ ,return_attention_mask=SCREAMING_SNAKE_CASE_ ,return_overflowing_tokens=SCREAMING_SNAKE_CASE_ ,return_special_tokens_mask=SCREAMING_SNAKE_CASE_ ,return_offsets_mapping=SCREAMING_SNAKE_CASE_ ,return_token_type_ids=SCREAMING_SNAKE_CASE_ ,return_length=SCREAMING_SNAKE_CASE_ ,verbose=SCREAMING_SNAKE_CASE_ ,return_tensors=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ,) _lowerCamelCase : str = qformer_text_encoding.pop("input_ids" ) _lowerCamelCase : List[str] = qformer_text_encoding.pop("attention_mask" ) if images is not None: _lowerCamelCase : Any = self.image_processor(SCREAMING_SNAKE_CASE_ ,return_tensors=SCREAMING_SNAKE_CASE_ ) encoding.update(SCREAMING_SNAKE_CASE_ ) return encoding def _lowercase ( self: str ,*__lowerCAmelCase: Any ,**__lowerCAmelCase: Optional[Any] ): '''simple docstring''' return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ) def _lowercase ( self: int ,*__lowerCAmelCase: Union[str, Any] ,**__lowerCAmelCase: int ): '''simple docstring''' return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase : List[Any] = self.tokenizer.model_input_names _lowerCamelCase : Dict = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Tuple ,**__lowerCAmelCase: Optional[Any] ): '''simple docstring''' if os.path.isfile(SCREAMING_SNAKE_CASE_ ): raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(SCREAMING_SNAKE_CASE_ ,exist_ok=SCREAMING_SNAKE_CASE_ ) _lowerCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE_ ,"qformer_tokenizer" ) self.qformer_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ ) return super().save_pretrained(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ) @classmethod def _lowercase ( cls: Union[str, Any] ,__lowerCAmelCase: int ,**__lowerCAmelCase: Dict ): '''simple docstring''' _lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ,subfolder="qformer_tokenizer" ) _lowerCamelCase : Optional[Any] = cls._get_arguments_from_pretrained(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ) args.append(SCREAMING_SNAKE_CASE_ ) return cls(*SCREAMING_SNAKE_CASE_ )
720
"""simple docstring""" from __future__ import annotations def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' if days_between_payments <= 0: raise ValueError("days_between_payments must be > 0" ) if daily_interest_rate < 0: raise ValueError("daily_interest_rate must be >= 0" ) if principal <= 0: raise ValueError("principal must be > 0" ) return principal * daily_interest_rate * days_between_payments def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> float: '''simple docstring''' if number_of_compounding_periods <= 0: raise ValueError("number_of_compounding_periods must be > 0" ) if nominal_annual_interest_rate_percentage < 0: raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" ) if principal <= 0: raise ValueError("principal must be > 0" ) return principal * ( (1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods - 1 ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> float: '''simple docstring''' if number_of_years <= 0: raise ValueError("number_of_years must be > 0" ) if nominal_annual_percentage_rate < 0: raise ValueError("nominal_annual_percentage_rate must be >= 0" ) if principal <= 0: raise ValueError("principal must be > 0" ) return compound_interest( _lowerCamelCase , nominal_annual_percentage_rate / 365 , number_of_years * 365 ) if __name__ == "__main__": import doctest doctest.testmod()
386
0
"""simple docstring""" import warnings from ..trainer import Trainer from ..utils import logging A_ = logging.get_logger(__name__) class __lowerCamelCase ( lowerCAmelCase ): def __init__( self , UpperCAmelCase=None , **UpperCAmelCase ): warnings.warn( '''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` ''' '''instead.''' , UpperCAmelCase , ) super().__init__(args=UpperCAmelCase , **UpperCAmelCase )
29
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class __SCREAMING_SNAKE_CASE : @property def __lowerCamelCase ( self ): return self.get_dummy_input() @property def __lowerCamelCase ( self ): if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(f"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , ): lowercase : Optional[int] = 4 lowercase : Dict = 32 lowercase : List[str] = (32, 32) lowercase : Optional[int] = torch.manual_seed(0 ) lowercase : Optional[int] = torch.device(SCREAMING_SNAKE_CASE__ ) lowercase : int = (batch_size, num_channels) + sizes lowercase : str = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ ) lowercase : Optional[Any] = {'''hidden_states''': hidden_states} if include_temb: lowercase : List[Any] = 128 lowercase : List[Any] = randn_tensor((batch_size, temb_channels) , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ ) if include_res_hidden_states_tuple: lowercase : List[Any] = torch.manual_seed(1 ) lowercase : Optional[Any] = (randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ ),) if include_encoder_hidden_states: lowercase : Optional[Any] = floats_tensor((batch_size, 32, 32) ).to(SCREAMING_SNAKE_CASE__ ) if include_skip_sample: lowercase : Dict = randn_tensor(((batch_size, 3) + sizes) , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ ) return dummy_input def __lowerCamelCase ( self ): lowercase : Optional[int] = { '''in_channels''': 32, '''out_channels''': 32, '''temb_channels''': 128, } if self.block_type == "up": lowercase : Optional[int] = 32 if self.block_type == "mid": init_dict.pop('''out_channels''' ) lowercase : Union[str, Any] = self.dummy_input return init_dict, inputs_dict def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase , lowercase : str = self.prepare_init_args_and_inputs_for_common() lowercase : List[str] = self.block_class(**SCREAMING_SNAKE_CASE__ ) unet_block.to(SCREAMING_SNAKE_CASE__ ) unet_block.eval() with torch.no_grad(): lowercase : Tuple = unet_block(**SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : Dict = output[0] self.assertEqual(output.shape , self.output_shape ) lowercase : Optional[Any] = output[0, -1, -3:, -3:] lowercase : Optional[int] = torch.tensor(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) assert torch_all_close(output_slice.flatten() , SCREAMING_SNAKE_CASE__ , atol=5E-3 ) @unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' ) def __lowerCamelCase ( self ): lowercase , lowercase : Dict = self.prepare_init_args_and_inputs_for_common() lowercase : Optional[int] = self.block_class(**SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.train() lowercase : Optional[Any] = model(**SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : Any = output[0] lowercase : int = torch.device(SCREAMING_SNAKE_CASE__ ) lowercase : Union[str, Any] = randn_tensor(output.shape , device=SCREAMING_SNAKE_CASE__ ) lowercase : Union[str, Any] = torch.nn.functional.mse_loss(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) loss.backward()
319
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A : Optional[Any] = { "configuration_xlm_roberta_xl": [ "XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMRobertaXLConfig", "XLMRobertaXLOnnxConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = [ "XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMRobertaXLForCausalLM", "XLMRobertaXLForMaskedLM", "XLMRobertaXLForMultipleChoice", "XLMRobertaXLForQuestionAnswering", "XLMRobertaXLForSequenceClassification", "XLMRobertaXLForTokenClassification", "XLMRobertaXLModel", "XLMRobertaXLPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel, ) else: import sys __A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
334
import argparse import os import re __A : List[Any] = "src/diffusers" # Pattern that looks at the indentation in a line. __A : Dict = re.compile(r"^(\s*)\S") # Pattern that matches `"key":" and puts `key` in group 0. __A : Optional[int] = re.compile(r"^\s*\"([^\"]+)\":") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. __A : Union[str, Any] = re.compile(r"^\s*_import_structure\[\"([^\"]+)\"\]") # Pattern that matches `"key",` and puts `key` in group 0. __A : List[Any] = re.compile(r"^\s*\"([^\"]+)\",\s*$") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. __A : List[str] = re.compile(r"\[([^\]]+)\]") def UpperCAmelCase ( UpperCamelCase__ ) -> Optional[int]: '''simple docstring''' __lowerCAmelCase = _re_indent.search(UpperCamelCase__ ) return "" if search is None else search.groups()[0] def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__="" , UpperCamelCase__=None , UpperCamelCase__=None ) -> List[str]: '''simple docstring''' __lowerCAmelCase = 0 __lowerCAmelCase = code.split("""\n""" ) if start_prompt is not None: while not lines[index].startswith(UpperCamelCase__ ): index += 1 __lowerCAmelCase = ["""\n""".join(lines[:index] )] else: __lowerCAmelCase = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). __lowerCAmelCase = [lines[index]] index += 1 while index < len(UpperCamelCase__ ) and (end_prompt is None or not lines[index].startswith(UpperCamelCase__ )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(UpperCamelCase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ): current_block.append(lines[index] ) blocks.append("""\n""".join(UpperCamelCase__ ) ) if index < len(UpperCamelCase__ ) - 1: __lowerCAmelCase = [lines[index + 1]] index += 1 else: __lowerCAmelCase = [] else: blocks.append("""\n""".join(UpperCamelCase__ ) ) __lowerCAmelCase = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(UpperCamelCase__ ) > 0: blocks.append("""\n""".join(UpperCamelCase__ ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(UpperCamelCase__ ): blocks.append("""\n""".join(lines[index:] ) ) return blocks def UpperCAmelCase ( UpperCamelCase__ ) -> Dict: '''simple docstring''' def _inner(UpperCamelCase__ ): return key(UpperCamelCase__ ).lower().replace("""_""" , """""" ) return _inner def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__=None ) -> Tuple: '''simple docstring''' def noop(UpperCamelCase__ ): return x if key is None: __lowerCAmelCase = noop # Constants are all uppercase, they go first. __lowerCAmelCase = [obj for obj in objects if key(UpperCamelCase__ ).isupper()] # Classes are not all uppercase but start with a capital, they go second. __lowerCAmelCase = [obj for obj in objects if key(UpperCamelCase__ )[0].isupper() and not key(UpperCamelCase__ ).isupper()] # Functions begin with a lowercase, they go last. __lowerCAmelCase = [obj for obj in objects if not key(UpperCamelCase__ )[0].isupper()] __lowerCAmelCase = ignore_underscore(UpperCamelCase__ ) return sorted(UpperCamelCase__ , key=UpperCamelCase__ ) + sorted(UpperCamelCase__ , key=UpperCamelCase__ ) + sorted(UpperCamelCase__ , key=UpperCamelCase__ ) def UpperCAmelCase ( UpperCamelCase__ ) -> Optional[int]: '''simple docstring''' def _replace(UpperCamelCase__ ): __lowerCAmelCase = match.groups()[0] if "," not in imports: return F'''[{imports}]''' __lowerCAmelCase = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: __lowerCAmelCase = keys[:-1] return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(UpperCamelCase__ )] ) + "]" __lowerCAmelCase = import_statement.split("""\n""" ) if len(UpperCamelCase__ ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. __lowerCAmelCase = 2 if lines[1].strip() == """[""" else 1 __lowerCAmelCase = [(i, _re_strip_line.search(UpperCamelCase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] __lowerCAmelCase = sort_objects(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[1] ) __lowerCAmelCase = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(UpperCamelCase__ ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: __lowerCAmelCase = _re_bracket_content.sub(_replace , lines[1] ) else: __lowerCAmelCase = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: __lowerCAmelCase = keys[:-1] __lowerCAmelCase = get_indent(lines[1] ) + """, """.join([F'''"{k}"''' for k in sort_objects(UpperCamelCase__ )] ) return "\n".join(UpperCamelCase__ ) else: # Finally we have to deal with imports fitting on one line __lowerCAmelCase = _re_bracket_content.sub(_replace , UpperCamelCase__ ) return import_statement def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__=True ) -> Optional[int]: '''simple docstring''' with open(UpperCamelCase__ , """r""" ) as f: __lowerCAmelCase = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 __lowerCAmelCase = split_code_in_indented_blocks( UpperCamelCase__ , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" ) # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(UpperCamelCase__ ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. __lowerCAmelCase = main_blocks[block_idx] __lowerCAmelCase = block.split("""\n""" ) # Get to the start of the imports. __lowerCAmelCase = 0 while line_idx < len(UpperCamelCase__ ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: __lowerCAmelCase = len(UpperCamelCase__ ) else: line_idx += 1 if line_idx >= len(UpperCamelCase__ ): continue # Ignore beginning and last line: they don't contain anything. __lowerCAmelCase = """\n""".join(block_lines[line_idx:-1] ) __lowerCAmelCase = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. __lowerCAmelCase = split_code_in_indented_blocks(UpperCamelCase__ , indent_level=UpperCamelCase__ ) # We have two categories of import key: list or _import_structure[key].append/extend __lowerCAmelCase = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. __lowerCAmelCase = [(pattern.search(UpperCamelCase__ ).groups()[0] if pattern.search(UpperCamelCase__ ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. __lowerCAmelCase = [(i, key) for i, key in enumerate(UpperCamelCase__ ) if key is not None] __lowerCAmelCase = [x[0] for x in sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. __lowerCAmelCase = 0 __lowerCAmelCase = [] for i in range(len(UpperCamelCase__ ) ): if keys[i] is None: reordered_blocks.append(internal_blocks[i] ) else: __lowerCAmelCase = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reordered_blocks.append(UpperCamelCase__ ) count += 1 # And we put our main block back together with its first and last line. __lowerCAmelCase = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] ) if code != "\n".join(UpperCamelCase__ ): if check_only: return True else: print(F'''Overwriting {file}.''' ) with open(UpperCamelCase__ , """w""" ) as f: f.write("""\n""".join(UpperCamelCase__ ) ) def UpperCAmelCase ( UpperCamelCase__=True ) -> Tuple: '''simple docstring''' __lowerCAmelCase = [] for root, _, files in os.walk(UpperCamelCase__ ): if "__init__.py" in files: __lowerCAmelCase = sort_imports(os.path.join(UpperCamelCase__ , """__init__.py""" ) , check_only=UpperCamelCase__ ) if result: __lowerCAmelCase = [os.path.join(UpperCamelCase__ , """__init__.py""" )] if len(UpperCamelCase__ ) > 0: raise ValueError(F'''Would overwrite {len(UpperCamelCase__ )} files, run `make style`.''' ) if __name__ == "__main__": __A : Tuple = argparse.ArgumentParser() parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.") __A : Any = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
334
1
"""simple docstring""" from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import BaseOutput, is_torch_available, is_transformers_available @dataclass class __UpperCAmelCase ( _lowerCamelCase ): '''simple docstring''' lowercase : Union[List[PIL.Image.Image], np.ndarray] lowercase : Optional[List[bool]] if is_transformers_available() and is_torch_available(): from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
255
"""simple docstring""" import string def _UpperCamelCase ( A ): UpperCamelCase_ ="" for i in sequence: UpperCamelCase_ =ord(A ) if 65 <= extract <= 90: output += chr(155 - extract ) elif 97 <= extract <= 122: output += chr(219 - extract ) else: output += i return output def _UpperCamelCase ( A ): UpperCamelCase_ =string.ascii_letters UpperCamelCase_ =string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1] return "".join( letters_reversed[letters.index(A )] if c in letters else c for c in sequence ) def _UpperCamelCase ( ): from timeit import timeit print("Running performance benchmarks..." ) UpperCamelCase_ ="from string import printable ; from __main__ import atbash, atbash_slow" print(f"""> atbash_slow(): {timeit("atbash_slow(printable)" , setup=A )} seconds""" ) print(f"""> atbash(): {timeit("atbash(printable)" , setup=A )} seconds""" ) if __name__ == "__main__": for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"): print(f'''{example} encrypted in atbash: {atbash(example)}''') benchmark()
391
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mobilebert import MobileBertTokenizer UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCAmelCase = { """vocab_file""": {"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"""}, """tokenizer_file""": { """mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json""" }, } UpperCAmelCase = {"""mobilebert-uncased""": 512} UpperCAmelCase = {} class lowerCAmelCase_ ( lowerCamelCase__ ): '''simple docstring''' __snake_case = VOCAB_FILES_NAMES __snake_case = PRETRAINED_VOCAB_FILES_MAP __snake_case = PRETRAINED_INIT_CONFIGURATION __snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __snake_case = MobileBertTokenizer def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase="[UNK]" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="[PAD]" , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[MASK]" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ): super().__init__( _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenize_chinese_chars=_UpperCAmelCase , strip_accents=_UpperCAmelCase , **_UpperCAmelCase , ) snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , _UpperCAmelCase ) != do_lower_case or normalizer_state.get('''strip_accents''' , _UpperCAmelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , _UpperCAmelCase ) != tokenize_chinese_chars ): snake_case_ = getattr(_UpperCAmelCase , normalizer_state.pop('''type''' ) ) snake_case_ = do_lower_case snake_case_ = strip_accents snake_case_ = tokenize_chinese_chars snake_case_ = normalizer_class(**_UpperCAmelCase ) snake_case_ = do_lower_case def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase=None ): snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase = None ): snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase = None ): snake_case_ = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase ) return tuple(_UpperCAmelCase )
531
from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent UpperCAmelCase = {"""UserAgent""": UserAgent().random} def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> dict: """simple docstring""" snake_case_ = script.contents[0] snake_case_ = json.loads(data[data.find('''{"config"''' ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class lowerCAmelCase_ : '''simple docstring''' def __init__( self , _UpperCAmelCase ): snake_case_ = F'''https://www.instagram.com/{username}/''' snake_case_ = self.get_json() def UpperCamelCase__ ( self ): snake_case_ = requests.get(self.url , headers=_UpperCAmelCase ).text snake_case_ = BeautifulSoup(_UpperCAmelCase , '''html.parser''' ).find_all('''script''' ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self ): return F'''{self.__class__.__name__}(\'{self.username}\')''' def __str__( self ): return F'''{self.fullname} ({self.username}) is {self.biography}''' @property def UpperCamelCase__ ( self ): return self.user_data["username"] @property def UpperCamelCase__ ( self ): return self.user_data["full_name"] @property def UpperCamelCase__ ( self ): return self.user_data["biography"] @property def UpperCamelCase__ ( self ): return self.user_data["business_email"] @property def UpperCamelCase__ ( self ): return self.user_data["external_url"] @property def UpperCamelCase__ ( self ): return self.user_data["edge_followed_by"]["count"] @property def UpperCamelCase__ ( self ): return self.user_data["edge_follow"]["count"] @property def UpperCamelCase__ ( self ): return self.user_data["edge_owner_to_timeline_media"]["count"] @property def UpperCamelCase__ ( self ): return self.user_data["profile_pic_url_hd"] @property def UpperCamelCase__ ( self ): return self.user_data["is_verified"] @property def UpperCamelCase__ ( self ): return self.user_data["is_private"] def __lowerCAmelCase (SCREAMING_SNAKE_CASE = "github" )-> None: """simple docstring""" import os if os.environ.get('''CI''' ): return # test failing on GitHub Actions snake_case_ = InstagramUser(SCREAMING_SNAKE_CASE ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , SCREAMING_SNAKE_CASE ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 150 assert instagram_user.number_of_followers > 12_0000 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith('''https://instagram.''' ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase = InstagramUser("""github""") print(instagram_user) print(f'''{instagram_user.number_of_posts = }''') print(f'''{instagram_user.number_of_followers = }''') print(f'''{instagram_user.number_of_followings = }''') print(f'''{instagram_user.email = }''') print(f'''{instagram_user.website = }''') print(f'''{instagram_user.profile_picture_url = }''') print(f'''{instagram_user.is_verified = }''') print(f'''{instagram_user.is_private = }''')
531
1
"""simple docstring""" import json import os import re import unicodedata from json.encoder import INFINITY from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import regex from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging from ...utils.generic import _is_jax, _is_numpy SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : int = { '''artists_file''': '''artists.json''', '''lyrics_file''': '''lyrics.json''', '''genres_file''': '''genres.json''', } SCREAMING_SNAKE_CASE : str = { '''artists_file''': { '''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json''', }, '''genres_file''': { '''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json''', }, '''lyrics_file''': { '''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json''', }, } SCREAMING_SNAKE_CASE : List[Any] = { '''jukebox''': 5_1_2, } class __lowerCamelCase ( __lowercase ): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_LYRIC_TOKENS_SIZES __UpperCamelCase = ['input_ids', 'attention_mask'] def __init__(self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=["v3", "v2", "v2"] , lowerCamelCase=512 , lowerCamelCase=5 , lowerCamelCase="<|endoftext|>" , **lowerCamelCase , ): '''simple docstring''' _lowerCAmelCase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else unk_token super().__init__( unk_token=lowerCamelCase , n_genres=lowerCamelCase , version=lowerCamelCase , max_n_lyric_tokens=lowerCamelCase , **lowerCamelCase , ) _lowerCAmelCase = version _lowerCAmelCase = max_n_lyric_tokens _lowerCAmelCase = n_genres with open(lowerCamelCase , encoding="""utf-8""" ) as vocab_handle: _lowerCAmelCase = json.load(lowerCamelCase ) with open(lowerCamelCase , encoding="""utf-8""" ) as vocab_handle: _lowerCAmelCase = json.load(lowerCamelCase ) with open(lowerCamelCase , encoding="""utf-8""" ) as vocab_handle: _lowerCAmelCase = json.load(lowerCamelCase ) _lowerCAmelCase = R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" # In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters. if len(self.lyrics_encoder ) == 79: _lowerCAmelCase = oov.replace(R"""\-'""" , R"""\-+'""" ) _lowerCAmelCase = regex.compile(lowerCamelCase ) _lowerCAmelCase = {v: k for k, v in self.artists_encoder.items()} _lowerCAmelCase = {v: k for k, v in self.genres_encoder.items()} _lowerCAmelCase = {v: k for k, v in self.lyrics_encoder.items()} @property def A__ (self ): '''simple docstring''' return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder ) def A__ (self ): '''simple docstring''' return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = [self.artists_encoder.get(lowerCamelCase , 0 ) for artist in list_artists] for genres in range(len(lowerCamelCase ) ): _lowerCAmelCase = [self.genres_encoder.get(lowerCamelCase , 0 ) for genre in list_genres[genres]] _lowerCAmelCase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] )) _lowerCAmelCase = [[self.lyrics_encoder.get(lowerCamelCase , 0 ) for character in list_lyrics[0]], [], []] return artists_id, list_genres, lyric_ids def A__ (self , lowerCamelCase ): '''simple docstring''' return list(lowerCamelCase ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.prepare_for_tokenization(lowerCamelCase , lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = self._tokenize(lowerCamelCase ) return artist, genre, lyrics def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False ): '''simple docstring''' for idx in range(len(self.version ) ): if self.version[idx] == "v3": _lowerCAmelCase = artists[idx].lower() _lowerCAmelCase = [genres[idx].lower()] else: _lowerCAmelCase = self._normalize(artists[idx] ) + """.v2""" _lowerCAmelCase = [ self._normalize(lowerCamelCase ) + """.v2""" for genre in genres[idx].split("""_""" ) ] # split is for the full dictionary with combined genres if self.version[0] == "v2": _lowerCAmelCase = regex.compile(R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" ) _lowerCAmelCase = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n""" _lowerCAmelCase = {vocab[index]: index + 1 for index in range(len(lowerCamelCase ) )} _lowerCAmelCase = 0 _lowerCAmelCase = len(lowerCamelCase ) + 1 _lowerCAmelCase = self.vocab _lowerCAmelCase = {v: k for k, v in self.vocab.items()} _lowerCAmelCase = """""" else: _lowerCAmelCase = regex.compile(R"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""" ) _lowerCAmelCase = self._run_strip_accents(lowerCamelCase ) _lowerCAmelCase = lyrics.replace("""\\""" , """\n""" ) _lowerCAmelCase = self.out_of_vocab.sub("""""" , lowerCamelCase ), [], [] return artists, genres, lyrics def A__ (self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = unicodedata.normalize("""NFD""" , lowerCamelCase ) _lowerCAmelCase = [] for char in text: _lowerCAmelCase = unicodedata.category(lowerCamelCase ) if cat == "Mn": continue output.append(lowerCamelCase ) return "".join(lowerCamelCase ) def A__ (self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = ( [chr(lowerCamelCase ) for i in range(ord("""a""" ) , ord("""z""" ) + 1 )] + [chr(lowerCamelCase ) for i in range(ord("""A""" ) , ord("""Z""" ) + 1 )] + [chr(lowerCamelCase ) for i in range(ord("""0""" ) , ord("""9""" ) + 1 )] + ["""."""] ) _lowerCAmelCase = frozenset(lowerCamelCase ) _lowerCAmelCase = re.compile(R"""_+""" ) _lowerCAmelCase = """""".join([c if c in accepted else """_""" for c in text.lower()] ) _lowerCAmelCase = pattern.sub("""_""" , lowerCamelCase ).strip("""_""" ) return text def A__ (self , lowerCamelCase ): '''simple docstring''' return " ".join(lowerCamelCase ) def A__ (self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ): '''simple docstring''' if not isinstance(lowerCamelCase , lowerCamelCase ): _lowerCAmelCase = TensorType(lowerCamelCase ) # Get a function reference for the correct framework if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError( """Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""" ) import tensorflow as tf _lowerCAmelCase = tf.constant _lowerCAmelCase = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""" ) import torch _lowerCAmelCase = torch.tensor _lowerCAmelCase = torch.is_tensor elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""" ) import jax.numpy as jnp # noqa: F811 _lowerCAmelCase = jnp.array _lowerCAmelCase = _is_jax else: _lowerCAmelCase = np.asarray _lowerCAmelCase = _is_numpy # Do the tensor conversion in batch try: if prepend_batch_axis: _lowerCAmelCase = [inputs] if not is_tensor(lowerCamelCase ): _lowerCAmelCase = as_tensor(lowerCamelCase ) except: # noqa E722 raise ValueError( """Unable to create tensor, you should probably activate truncation and/or padding """ """with 'padding=True' 'truncation=True' to have batched tensors with the same length.""" ) return inputs def __call__(self , lowerCamelCase , lowerCamelCase , lowerCamelCase="" , lowerCamelCase="pt" ): '''simple docstring''' _lowerCAmelCase = [0, 0, 0] _lowerCAmelCase = [artist] * len(self.version ) _lowerCAmelCase = [genres] * len(self.version ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.tokenize(lowerCamelCase , lowerCamelCase , lowerCamelCase ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self._convert_token_to_id(lowerCamelCase , lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = [-INFINITY] * len(full_tokens[-1] ) _lowerCAmelCase = [ self.convert_to_tensors( [input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=lowerCamelCase ) for i in range(len(self.version ) ) ] return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks} ) def A__ (self , lowerCamelCase , lowerCamelCase = None ): '''simple docstring''' if not os.path.isdir(lowerCamelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return _lowerCAmelCase = os.path.join( lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""] ) with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.artists_encoder , ensure_ascii=lowerCamelCase ) ) _lowerCAmelCase = os.path.join( lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""] ) with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.genres_encoder , ensure_ascii=lowerCamelCase ) ) _lowerCAmelCase = os.path.join( lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""] ) with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.lyrics_encoder , ensure_ascii=lowerCamelCase ) ) return (artists_file, genres_file, lyrics_file) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.artists_decoder.get(lowerCamelCase ) _lowerCAmelCase = [self.genres_decoder.get(lowerCamelCase ) for genre in genres_index] _lowerCAmelCase = [self.lyrics_decoder.get(lowerCamelCase ) for character in lyric_index] return artist, genres, lyrics
156
"""simple docstring""" import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( '''--original_config_file''', default=None, type=str, help='''The YAML config file corresponding to the original architecture.''', ) parser.add_argument( '''--num_in_channels''', default=None, type=int, help='''The number of input channels. If `None` number of input channels will be automatically inferred.''', ) parser.add_argument( '''--scheduler_type''', default='''pndm''', type=str, help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''', ) parser.add_argument( '''--pipeline_type''', default=None, type=str, help=( '''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\'''' '''. If `None` pipeline will be automatically inferred.''' ), ) parser.add_argument( '''--image_size''', default=None, type=int, help=( '''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2''' ''' Base. Use 768 for Stable Diffusion v2.''' ), ) parser.add_argument( '''--prediction_type''', default=None, type=str, help=( '''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable''' ''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.''' ), ) parser.add_argument( '''--extract_ema''', action='''store_true''', help=( '''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights''' ''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield''' ''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.''' ), ) parser.add_argument( '''--upcast_attention''', action='''store_true''', help=( '''Whether the attention computation should always be upcasted. This is necessary when running stable''' ''' diffusion 2.1.''' ), ) parser.add_argument( '''--from_safetensors''', action='''store_true''', help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''', ) parser.add_argument( '''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''') parser.add_argument( '''--stable_unclip''', type=str, default=None, required=False, help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''', ) parser.add_argument( '''--stable_unclip_prior''', type=str, default=None, required=False, help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''', ) parser.add_argument( '''--clip_stats_path''', type=str, help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''', required=False, ) parser.add_argument( '''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.''' ) parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''') parser.add_argument( '''--vae_path''', type=str, default=None, required=False, help='''Set to a path, hub id to an already converted vae to not convert it again.''', ) SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args() SCREAMING_SNAKE_CASE : str = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
156
1
'''simple docstring''' from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Union[str, Any]: lowerCamelCase__ : Dict = int(number**0.5 ) return number == sq * sq def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCamelCase__ : str = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den lowerCamelCase__ : Optional[Any] = x_den * y_den * z_den lowerCamelCase__ : Tuple = gcd(lowerCamelCase_ , lowerCamelCase_ ) top //= hcf bottom //= hcf return top, bottom def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 35 ) -> Optional[Any]: lowerCamelCase__ : List[Any] = set() lowerCamelCase__ : Tuple = 42 lowerCamelCase__ : List[str] = Fraction(0 ) lowerCamelCase__ : Optional[Any] = 42 for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 lowerCamelCase__ : Dict = x_num * y_den + x_den * y_num lowerCamelCase__ : Tuple = x_den * y_den lowerCamelCase__ : str = gcd(lowerCamelCase_ , lowerCamelCase_ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowerCamelCase__ : Optional[int] = add_three( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) unique_s.add(lowerCamelCase_ ) # n=2 lowerCamelCase__ : Tuple = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) lowerCamelCase__ : Any = x_den * x_den * y_den * y_den if is_sq(lowerCamelCase_ ) and is_sq(lowerCamelCase_ ): lowerCamelCase__ : Tuple = int(sqrt(lowerCamelCase_ ) ) lowerCamelCase__ : Optional[int] = int(sqrt(lowerCamelCase_ ) ) lowerCamelCase__ : Any = gcd(lowerCamelCase_ , lowerCamelCase_ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowerCamelCase__ : Any = add_three( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) unique_s.add(lowerCamelCase_ ) # n=-1 lowerCamelCase__ : int = x_num * y_num lowerCamelCase__ : Any = x_den * y_num + x_num * y_den lowerCamelCase__ : Any = gcd(lowerCamelCase_ , lowerCamelCase_ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowerCamelCase__ : List[str] = add_three( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) unique_s.add(lowerCamelCase_ ) # n=2 lowerCamelCase__ : Optional[int] = x_num * x_num * y_num * y_num lowerCamelCase__ : int = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(lowerCamelCase_ ) and is_sq(lowerCamelCase_ ): lowerCamelCase__ : List[str] = int(sqrt(lowerCamelCase_ ) ) lowerCamelCase__ : Optional[Any] = int(sqrt(lowerCamelCase_ ) ) lowerCamelCase__ : Tuple = gcd(lowerCamelCase_ , lowerCamelCase_ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowerCamelCase__ : int = add_three( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) unique_s.add(lowerCamelCase_ ) for num, den in unique_s: total += Fraction(lowerCamelCase_ , lowerCamelCase_ ) return total.denominator + total.numerator if __name__ == "__main__": print(F'{solution() = }')
712
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _A : Dict ={ '''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''], '''processing_git''': ['''GitProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Union[str, Any] =[ '''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GitForCausalLM''', '''GitModel''', '''GitPreTrainedModel''', '''GitVisionModel''', ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys _A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
631
0
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _SCREAMING_SNAKE_CASE ( snake_case_ ): lowerCAmelCase__ = ['image_processor', 'tokenizer'] lowerCAmelCase__ = 'AutoImageProcessor' lowerCAmelCase__ = 'AutoTokenizer' def __init__( self , lowercase , lowercase ) -> List[str]: super().__init__(lowercase , lowercase ) lowerCamelCase_ = self.image_processor def __call__( self , lowercase=None , lowercase=None , lowercase=None , **lowercase ) -> Optional[Any]: if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: lowerCamelCase_ = self.tokenizer(lowercase , return_tensors=lowercase , **lowercase ) if images is not None: lowerCamelCase_ = self.image_processor(lowercase , return_tensors=lowercase , **lowercase ) if text is not None and images is not None: lowerCamelCase_ = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowercase ) , tensor_type=lowercase ) def SCREAMING_SNAKE_CASE_( self , *lowercase , **lowercase ) -> List[str]: return self.tokenizer.batch_decode(*lowercase , **lowercase ) def SCREAMING_SNAKE_CASE_( self , *lowercase , **lowercase ) -> int: return self.tokenizer.decode(*lowercase , **lowercase ) @property def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]: return ["input_ids", "attention_mask", "pixel_values"]
463
import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class _SCREAMING_SNAKE_CASE : def __init__( self , lowercase , lowercase=99 , lowercase=13 , lowercase=7 , lowercase=9 , lowercase=True , lowercase=True , lowercase=False , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase=8 , lowercase=0.1 , lowercase=0.0_0_2 , lowercase=1 , lowercase=0 , lowercase=0 , lowercase=None , lowercase=None , ) -> Tuple: lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = encoder_seq_length lowerCamelCase_ = decoder_seq_length # For common tests lowerCamelCase_ = self.decoder_seq_length lowerCamelCase_ = is_training lowerCamelCase_ = use_attention_mask lowerCamelCase_ = use_labels lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = d_ff lowerCamelCase_ = relative_attention_num_buckets lowerCamelCase_ = dropout_rate lowerCamelCase_ = initializer_factor lowerCamelCase_ = eos_token_id lowerCamelCase_ = pad_token_id lowerCamelCase_ = decoder_start_token_id lowerCamelCase_ = None lowerCamelCase_ = decoder_layers def SCREAMING_SNAKE_CASE_( self ) -> List[str]: return TaConfig.from_pretrained("google/umt5-base" ) def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None , ) -> str: if attention_mask is None: lowerCamelCase_ = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: lowerCamelCase_ = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: lowerCamelCase_ = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=lowercase ) if decoder_head_mask is None: lowerCamelCase_ = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=lowercase ) if cross_attn_head_mask is None: lowerCamelCase_ = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=lowercase ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def SCREAMING_SNAKE_CASE_( self ) -> Dict: lowerCamelCase_ = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) lowerCamelCase_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input lowerCamelCase_ = input_ids.clamp(self.pad_token_id + 1 ) lowerCamelCase_ = decoder_input_ids.clamp(self.pad_token_id + 1 ) lowerCamelCase_ = self.get_config() lowerCamelCase_ = config.num_attention_heads lowerCamelCase_ = self.prepare_inputs_dict(lowercase , lowercase , lowercase ) return config, input_dict def SCREAMING_SNAKE_CASE_( self ) -> str: lowerCamelCase_ , lowerCamelCase_ = self.prepare_config_and_inputs() return config, inputs_dict def SCREAMING_SNAKE_CASE_( self ) -> List[str]: return TaConfig( vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def SCREAMING_SNAKE_CASE_( self ) -> Any: return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Optional[Any]: lowerCamelCase_ = UMTaModel(config=lowercase ) model.to(lowercase ) model.eval() lowerCamelCase_ = model( input_ids=lowercase , decoder_input_ids=lowercase , attention_mask=lowercase , decoder_attention_mask=lowercase , ) lowerCamelCase_ = model(input_ids=lowercase , decoder_input_ids=lowercase ) lowerCamelCase_ = result.last_hidden_state lowerCamelCase_ = result.past_key_values lowerCamelCase_ = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(lowercase ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Optional[int]: lowerCamelCase_ = UMTaModel(config=lowercase ).get_decoder().to(lowercase ).eval() # first forward pass lowerCamelCase_ = model(lowercase , use_cache=lowercase ) lowerCamelCase_ = model(lowercase ) lowerCamelCase_ = model(lowercase , use_cache=lowercase ) self.parent.assertTrue(len(lowercase ) == len(lowercase ) ) self.parent.assertTrue(len(lowercase ) == len(lowercase ) + 1 ) lowerCamelCase_ , lowerCamelCase_ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowerCamelCase_ = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and lowerCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCamelCase_ = model(lowercase )["last_hidden_state"] lowerCamelCase_ = model(lowercase , past_key_values=lowercase )["last_hidden_state"] # select random slice lowerCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCamelCase_ = output_from_no_past[:, -1, random_slice_idx].detach() lowerCamelCase_ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowercase , lowercase , atol=1e-3 ) ) def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , ) -> Tuple: lowerCamelCase_ = UMTaModel(config=lowercase ).to(lowercase ).half().eval() lowerCamelCase_ = model(**lowercase )["last_hidden_state"] self.parent.assertFalse(torch.isnan(lowercase ).any().item() ) @require_torch class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ): lowerCAmelCase__ = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) lowerCAmelCase__ = (UMTaForConditionalGeneration,) if is_torch_available() else () lowerCAmelCase__ = ( { 'conversational': UMTaForConditionalGeneration, 'feature-extraction': UMTaModel, 'summarization': UMTaForConditionalGeneration, 'text2text-generation': UMTaForConditionalGeneration, 'translation': UMTaForConditionalGeneration, 'question-answering': UMTaForQuestionAnswering, } if is_torch_available() else {} ) lowerCAmelCase__ = True lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = True lowerCAmelCase__ = True # The small UMT5 model needs higher percentages for CPU/MP tests lowerCAmelCase__ = [0.8, 0.9] def SCREAMING_SNAKE_CASE_( self ) -> str: lowerCamelCase_ = UMTaModelTester(self ) @unittest.skip("Test has a segmentation fault on torch 1.8.0" ) def SCREAMING_SNAKE_CASE_( self ) -> List[str]: lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() lowerCamelCase_ = UMTaModel(config_and_inputs[0] ).to(lowercase ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( lowercase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'{tmpdirname}/t5_test.onnx' , export_params=lowercase , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , ) @unittest.skipIf(torch_device == "cpu" , "Cant do half precision" ) def SCREAMING_SNAKE_CASE_( self ) -> str: lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*lowercase ) def SCREAMING_SNAKE_CASE_( self ) -> List[str]: lowerCamelCase_ = ["encoder_attentions", "decoder_attentions", "cross_attentions"] lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() lowerCamelCase_ = config_and_inputs[0] lowerCamelCase_ = UMTaForConditionalGeneration(lowercase ).eval() model.to(lowercase ) lowerCamelCase_ = { "head_mask": torch.zeros(config.num_layers , config.num_heads , device=lowercase ), "decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowercase ), "cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowercase ), } for attn_name, (name, mask) in zip(lowercase , head_masking.items() ): lowerCamelCase_ = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": lowerCamelCase_ = torch.ones( config.num_decoder_layers , config.num_heads , device=lowercase ) lowerCamelCase_ = model.generate( config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=lowercase , return_dict_in_generate=lowercase , **lowercase , ) # We check the state of decoder_attentions and cross_attentions just from the last step lowerCamelCase_ = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip("Does not work on the tiny model as we keep hitting edge cases." ) def SCREAMING_SNAKE_CASE_( self ) -> Tuple: pass @require_torch @require_sentencepiece @require_tokenizers class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow @unittest.skip( "Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" ) def SCREAMING_SNAKE_CASE_( self ) -> int: lowerCamelCase_ = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=lowercase ).to(lowercase ) lowerCamelCase_ = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=lowercase , legacy=lowercase ) lowerCamelCase_ = [ "Bonjour monsieur <extra_id_0> bien <extra_id_1>.", "No se como puedo <extra_id_0>.", "This is the reason why we <extra_id_0> them.", "The <extra_id_0> walks in <extra_id_1>, seats", "A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.", ] lowerCamelCase_ = tokenizer(lowercase , return_tensors="pt" , padding=lowercase ).input_ids # fmt: off lowerCamelCase_ = torch.tensor( [ [ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1], ] ) # fmt: on torch.testing.assert_allclose(lowercase , lowercase ) lowerCamelCase_ = model.generate(input_ids.to(lowercase ) ) lowerCamelCase_ = [ "<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>", "<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", ] lowerCamelCase_ = tokenizer.batch_decode(lowercase ) self.assertEqual(lowercase , lowercase )
463
1
"""simple docstring""" import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' return 1.0 / (1.0 + np.exp(-_outputs )) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = np.max(_outputs , axis=-1 , keepdims=lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowerCAmelCase_ ) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : str = "sigmoid" snake_case__ : List[str] = "softmax" snake_case__ : str = "none" @add_end_docstrings( UpperCamelCase , R"\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n " , ) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : Dict = False snake_case__ : Optional[Any] = ClassificationFunction.NONE def __init__( self : Tuple , **UpperCAmelCase__ : Optional[Any] ) -> List[Any]: super().__init__(**UpperCAmelCase__ ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : str="" , **UpperCAmelCase__ : int ) -> Union[str, Any]: # Using "" as default argument because we're going to use `top_k=None` in user code to declare # "No top_k" __SCREAMING_SNAKE_CASE = tokenizer_kwargs __SCREAMING_SNAKE_CASE = {} if hasattr(self.model.config , "return_all_scores" ) and return_all_scores is None: __SCREAMING_SNAKE_CASE = self.model.config.return_all_scores if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or top_k is None: __SCREAMING_SNAKE_CASE = top_k __SCREAMING_SNAKE_CASE = False elif return_all_scores is not None: warnings.warn( "`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of" " `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , UpperCAmelCase__ , ) if return_all_scores: __SCREAMING_SNAKE_CASE = None else: __SCREAMING_SNAKE_CASE = 1 if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: __SCREAMING_SNAKE_CASE = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self : List[str] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : List[Any] ) -> str: __SCREAMING_SNAKE_CASE = super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. __SCREAMING_SNAKE_CASE = "top_k" not in kwargs if isinstance(args[0] , UpperCAmelCase__ ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : str , **UpperCAmelCase__ : Any ) -> Dict[str, GenericTensor]: __SCREAMING_SNAKE_CASE = self.framework if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): return self.tokenizer(**UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ ) elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and len(UpperCAmelCase__ ) == 1 and isinstance(inputs[0] , UpperCAmelCase__ ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ ) elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( "The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a" " dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." ) return self.tokenizer(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ ) def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : int ) -> int: return self.model(**UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Union[str, Any]=1 , UpperCAmelCase__ : Any=True ) -> Dict: # `_legacy` is used to determine if we're running the naked pipeline and in backward # compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running # the more natural result containing the list. # Default value before `set_parameters` if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: __SCREAMING_SNAKE_CASE = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: __SCREAMING_SNAKE_CASE = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , "function_to_apply" ) and function_to_apply is None: __SCREAMING_SNAKE_CASE = self.model.config.function_to_apply else: __SCREAMING_SNAKE_CASE = ClassificationFunction.NONE __SCREAMING_SNAKE_CASE = model_outputs["logits"][0] __SCREAMING_SNAKE_CASE = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: __SCREAMING_SNAKE_CASE = sigmoid(UpperCAmelCase__ ) elif function_to_apply == ClassificationFunction.SOFTMAX: __SCREAMING_SNAKE_CASE = softmax(UpperCAmelCase__ ) elif function_to_apply == ClassificationFunction.NONE: __SCREAMING_SNAKE_CASE = outputs else: raise ValueError(F"""Unrecognized `function_to_apply` argument: {function_to_apply}""" ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} __SCREAMING_SNAKE_CASE = [ {"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(UpperCAmelCase__ ) ] if not _legacy: dict_scores.sort(key=lambda UpperCAmelCase__ : x["score"] , reverse=UpperCAmelCase__ ) if top_k is not None: __SCREAMING_SNAKE_CASE = dict_scores[:top_k] return dict_scores
703
"""simple docstring""" import copy import tempfile import unittest from transformers import MaMaaaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ): '''simple docstring''' if attention_mask is None: __SCREAMING_SNAKE_CASE = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: __SCREAMING_SNAKE_CASE = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: __SCREAMING_SNAKE_CASE = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=lowerCAmelCase_ ) if decoder_head_mask is None: __SCREAMING_SNAKE_CASE = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowerCAmelCase_ ) if cross_attn_head_mask is None: __SCREAMING_SNAKE_CASE = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowerCAmelCase_ ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class UpperCamelCase_ : """simple docstring""" def __init__( self : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict=1_3 , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : Optional[int]=9_9 , UpperCAmelCase__ : Dict=1_6 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : Optional[int]="relu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : List[str]=2_0 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : Union[str, Any]=0 , ) -> Any: __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = encoder_layerdrop __SCREAMING_SNAKE_CASE = decoder_layerdrop __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = eos_token_id __SCREAMING_SNAKE_CASE = pad_token_id __SCREAMING_SNAKE_CASE = bos_token_id def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = self.eos_token_id # Eos Token __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for M2M100 the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input __SCREAMING_SNAKE_CASE = input_ids.clamp(self.pad_token_id + 1 ) __SCREAMING_SNAKE_CASE = decoder_input_ids.clamp(self.pad_token_id + 1 ) __SCREAMING_SNAKE_CASE = self.get_config() __SCREAMING_SNAKE_CASE = prepare_mam_aaa_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) return config, inputs_dict def UpperCAmelCase_ ( self : Optional[Any] ) -> Any: return MaMaaaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> int: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() return config, inputs_dict def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) -> Optional[int]: __SCREAMING_SNAKE_CASE = MaMaaaModel(config=UpperCAmelCase__ ).get_decoder().to(UpperCAmelCase__ ).eval() __SCREAMING_SNAKE_CASE = inputs_dict["input_ids"] __SCREAMING_SNAKE_CASE = inputs_dict["attention_mask"] __SCREAMING_SNAKE_CASE = inputs_dict["head_mask"] # first forward pass __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size ) __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and __SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 ) __SCREAMING_SNAKE_CASE = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )["last_hidden_state"] __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )[ "last_hidden_state" ] # select random slice __SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item() __SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach() __SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-2 ) ) def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] ) -> Optional[int]: __SCREAMING_SNAKE_CASE = MaMaaaModel(config=UpperCAmelCase__ ).to(UpperCAmelCase__ ).eval() __SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = outputs.encoder_last_hidden_state __SCREAMING_SNAKE_CASE = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = model.get_encoder() encoder.save_pretrained(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = MaMaaaEncoder.from_pretrained(UpperCAmelCase__ ).to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 ) with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = model.get_decoder() decoder.save_pretrained(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = MaMaaaDecoder.from_pretrained(UpperCAmelCase__ ).to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = decoder( input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=inputs_dict["attention_mask"] , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 ) @require_torch class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase): """simple docstring""" snake_case__ : List[str] = ( ( MaMaaaModel, MaMaaaForConditionalGeneration, ) if is_torch_available() else () ) snake_case__ : Tuple = (MaMaaaForConditionalGeneration,) if is_torch_available() else () snake_case__ : Dict = ( { "conversational": MaMaaaForConditionalGeneration, "feature-extraction": MaMaaaModel, "summarization": MaMaaaForConditionalGeneration, "text2text-generation": MaMaaaForConditionalGeneration, "translation": MaMaaaForConditionalGeneration, } if is_torch_available() else {} ) snake_case__ : Optional[int] = True snake_case__ : Optional[Any] = True snake_case__ : Union[str, Any] = False snake_case__ : Tuple = False def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict ) -> Optional[Any]: if pipeline_test_casse_name == "TranslationPipelineTests": # Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`. # `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer. return True return False def UpperCAmelCase_ ( self : Dict ) -> List[str]: __SCREAMING_SNAKE_CASE = MaMaaaModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple: self.config_tester.run_common_tests() def UpperCAmelCase_ ( self : Dict ) -> Tuple: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase__ ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model_class.from_pretrained(UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ ) self.assertEqual(info["missing_keys"] , [] ) def UpperCAmelCase_ ( self : Tuple ) -> Tuple: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : str ) -> Optional[int]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration): __SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = copy.deepcopy(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) ) if not self.is_encoder_decoder: __SCREAMING_SNAKE_CASE = inputs["input_ids"] del inputs["input_ids"] else: __SCREAMING_SNAKE_CASE = inputs["input_ids"] __SCREAMING_SNAKE_CASE = inputs.get("decoder_input_ids" , UpperCAmelCase__ ) del inputs["input_ids"] inputs.pop("decoder_input_ids" , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model.get_input_embeddings() if not self.is_encoder_decoder: __SCREAMING_SNAKE_CASE = wte(UpperCAmelCase__ ) else: __SCREAMING_SNAKE_CASE = wte(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = wte(UpperCAmelCase__ ) with torch.no_grad(): model(**UpperCAmelCase__ )[0] def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE = input_dict["input_ids"] __SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration(UpperCAmelCase__ ).eval().to(UpperCAmelCase__ ) if torch_device == "cuda": model.half() model.generate(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ ) model.generate(num_beams=4 , do_sample=UpperCAmelCase__ , early_stopping=UpperCAmelCase__ , num_return_sequences=3 ) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' return torch.tensor(lowerCAmelCase_ , dtype=torch.long , device=lowerCAmelCase_ ) a__ : Union[str, Any] = 1E-4 @require_torch @require_sentencepiece @require_tokenizers @slow class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" @cached_property def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]: return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" ) def UpperCAmelCase_ ( self : Dict ) -> List[Any]: __SCREAMING_SNAKE_CASE = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] ) __SCREAMING_SNAKE_CASE = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] ) __SCREAMING_SNAKE_CASE = prepare_mam_aaa_inputs_dict(model.config , UpperCAmelCase__ , UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ )[0] __SCREAMING_SNAKE_CASE = torch.Size((1, 1_1, 1_0_2_4) ) self.assertEqual(output.shape , UpperCAmelCase__ ) # change to expected output here __SCREAMING_SNAKE_CASE = torch.tensor( [[-0.7_780, -0.1_676, 0.1_038], [-6.7_556, -1.3_992, 0.0_567], [-7.5_383, -0.5_920, -0.2_779]] , device=UpperCAmelCase__ ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__ ) ) def UpperCAmelCase_ ( self : Tuple ) -> str: __SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(UpperCAmelCase__ ) # change to intended input __SCREAMING_SNAKE_CASE = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] ) __SCREAMING_SNAKE_CASE = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] ) __SCREAMING_SNAKE_CASE = prepare_mam_aaa_inputs_dict(model.config , UpperCAmelCase__ , UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ )[0] __SCREAMING_SNAKE_CASE = torch.Size((1, 1_1, model.config.vocab_size) ) self.assertEqual(output.shape , UpperCAmelCase__ ) # change to expected output here __SCREAMING_SNAKE_CASE = torch.tensor( [[-1.0_448, -1.0_411, 3.7_992], [-3.2_191, -3.2_386, -1.3_451], [-3.6_210, -3.5_993, 0.4_925]] , device=UpperCAmelCase__ ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__ ) ) def UpperCAmelCase_ ( self : Tuple ) -> Any: __SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" ) __SCREAMING_SNAKE_CASE = [ "L'affaire NSA souligne l'absence totale de débat sur le renseignement", "Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.", "Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent" " Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de" " l'ampleur de la surveillance américaine sur l'ensemble des communications en France.", ] # The below article tests that we don't add any hypotheses outside of the top n_beams __SCREAMING_SNAKE_CASE = tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors="pt" ) __SCREAMING_SNAKE_CASE = model.generate( input_ids=dct["input_ids"].to(UpperCAmelCase__ ) , attention_mask=dct["attention_mask"].to(UpperCAmelCase__ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , ) __SCREAMING_SNAKE_CASE = [ "The NSA case highlights the total absence of intelligence debate", "I think there are two levels of response from the French government.", "When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S." " Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all" " communications in France.", ] __SCREAMING_SNAKE_CASE = tokenizer.batch_decode( hypotheses_batch.tolist() , clean_up_tokenization_spaces=UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ ) assert generated == expected_en
553
0
import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCAmelCase_ ( unittest.TestCase ): def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any]=3 , SCREAMING_SNAKE_CASE_ : int=32 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3 , SCREAMING_SNAKE_CASE_ : List[Any]=10 , SCREAMING_SNAKE_CASE_ : List[Any]=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE_ : int=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Tuple="relu" , SCREAMING_SNAKE_CASE_ : Dict=3 , SCREAMING_SNAKE_CASE_ : str=None , ): lowerCAmelCase__ = parent lowerCAmelCase__ = batch_size lowerCAmelCase__ = image_size lowerCAmelCase__ = num_channels lowerCAmelCase__ = embeddings_size lowerCAmelCase__ = hidden_sizes lowerCAmelCase__ = depths lowerCAmelCase__ = is_training lowerCAmelCase__ = use_labels lowerCAmelCase__ = hidden_act lowerCAmelCase__ = num_labels lowerCAmelCase__ = scope lowerCAmelCase__ = len(SCREAMING_SNAKE_CASE_ ) def __snake_case ( self : int ): lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ = self.get_config() return config, pixel_values def __snake_case ( self : Optional[Any] ): return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] ): lowerCAmelCase__ = FlaxRegNetModel(config=SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ ) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ): lowerCAmelCase__ = self.num_labels lowerCAmelCase__ = FlaxRegNetForImageClassification(config=SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __snake_case ( self : List[Any] ): lowerCAmelCase__ = self.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs lowerCAmelCase__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_flax class lowerCAmelCase_ ( snake_case__ , unittest.TestCase ): UpperCamelCase_ :Any = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () UpperCamelCase_ :List[str] = False UpperCamelCase_ :Any = False UpperCamelCase_ :Optional[Any] = False def __snake_case ( self : Optional[Any] ): lowerCAmelCase__ = FlaxRegNetModelTester(self ) lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ ) def __snake_case ( self : int ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __snake_case ( self : List[str] ): return def __snake_case ( self : Tuple ): lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def __snake_case ( self : int ): lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ ) @unittest.skip(reason='''RegNet does not use inputs_embeds''' ) def __snake_case ( self : Tuple ): pass @unittest.skip(reason='''RegNet does not support input and output embeddings''' ) def __snake_case ( self : Optional[Any] ): pass def __snake_case ( self : Any ): lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ = [*signature.parameters.keys()] lowerCAmelCase__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ ) def __snake_case ( self : Union[str, Any] ): def check_hidden_states_output(SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ): lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) lowerCAmelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCAmelCase__ = self.model_tester.num_stages self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , expected_num_stages + 1 ) lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ = True check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase__ = True check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def __snake_case ( self : Tuple ): lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCAmelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE_ ) @jax.jit def model_jitted(SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Tuple ): return model(pixel_values=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) with self.subTest('''JIT Enabled''' ): lowerCAmelCase__ = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowerCAmelCase__ = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCAmelCase_ () -> Optional[int]: '''simple docstring''' lowerCAmelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_flax class lowerCAmelCase_ ( unittest.TestCase ): @cached_property def __snake_case ( self : Any ): return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None @slow def __snake_case ( self : Dict ): lowerCAmelCase__ = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' ) lowerCAmelCase__ = self.default_image_processor lowerCAmelCase__ = prepare_img() lowerCAmelCase__ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ) lowerCAmelCase__ = model(**SCREAMING_SNAKE_CASE_ ) # verify the logits lowerCAmelCase__ = (1, 1_000) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ = jnp.array([-0.4_180, -1.5_051, -3.4_836] ) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
668
from typing import Any def lowerCAmelCase_ (lowercase__ : list , lowercase__ : list , lowercase__ : dict , lowercase__ : dict , lowercase__ : dict , ) -> list: '''simple docstring''' _validation( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) # Creates data structures and fill initial step lowerCAmelCase__ = {} lowerCAmelCase__ = {} for state in states_space: lowerCAmelCase__ = observations_space[0] lowerCAmelCase__ = ( initial_probabilities[state] * emission_probabilities[state][observation] ) lowerCAmelCase__ = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(lowercase__ ) ): lowerCAmelCase__ = observations_space[o] lowerCAmelCase__ = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function lowerCAmelCase__ = '''''' lowerCAmelCase__ = -1 for k_state in states_space: lowerCAmelCase__ = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: lowerCAmelCase__ = probability lowerCAmelCase__ = k_state # Update probabilities and pointers dicts lowerCAmelCase__ = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) lowerCAmelCase__ = arg_max # The final observation lowerCAmelCase__ = observations_space[len(lowercase__ ) - 1] # argmax for given final observation lowerCAmelCase__ = '''''' lowerCAmelCase__ = -1 for k_state in states_space: lowerCAmelCase__ = probabilities[(k_state, final_observation)] if probability > max_probability: lowerCAmelCase__ = probability lowerCAmelCase__ = k_state lowerCAmelCase__ = arg_max # Process pointers backwards lowerCAmelCase__ = last_state lowerCAmelCase__ = [] for o in range(len(lowercase__ ) - 1 , -1 , -1 ): result.append(lowercase__ ) lowerCAmelCase__ = pointers[previous, observations_space[o]] result.reverse() return result def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , ) -> None: '''simple docstring''' _validate_not_empty( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) _validate_lists(lowercase__ , lowercase__ ) _validate_dicts( lowercase__ , lowercase__ , lowercase__ ) def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , ) -> None: '''simple docstring''' if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError('''There\'s an empty parameter''' ) def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any ) -> None: '''simple docstring''' _validate_list(lowercase__ , '''observations_space''' ) _validate_list(lowercase__ , '''states_space''' ) def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : str ) -> None: '''simple docstring''' if not isinstance(_object , lowercase__ ): lowerCAmelCase__ = f'{var_name} must be a list' raise ValueError(lowercase__ ) else: for x in _object: if not isinstance(lowercase__ , lowercase__ ): lowerCAmelCase__ = f'{var_name} must be a list of strings' raise ValueError(lowercase__ ) def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , ) -> None: '''simple docstring''' _validate_dict(lowercase__ , '''initial_probabilities''' , lowercase__ ) _validate_nested_dict(lowercase__ , '''transition_probabilities''' ) _validate_nested_dict(lowercase__ , '''emission_probabilities''' ) def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : str ) -> None: '''simple docstring''' _validate_dict(_object , lowercase__ , lowercase__ ) for x in _object.values(): _validate_dict(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : str , lowercase__ : type , lowercase__ : bool = False ) -> None: '''simple docstring''' if not isinstance(_object , lowercase__ ): lowerCAmelCase__ = f'{var_name} must be a dict' raise ValueError(lowercase__ ) if not all(isinstance(lowercase__ , lowercase__ ) for x in _object ): lowerCAmelCase__ = f'{var_name} all keys must be strings' raise ValueError(lowercase__ ) if not all(isinstance(lowercase__ , lowercase__ ) for x in _object.values() ): lowerCAmelCase__ = '''nested dictionary ''' if nested else '''''' lowerCAmelCase__ = f'{var_name} {nested_text}all values must be {value_type.__name__}' raise ValueError(lowercase__ ) if __name__ == "__main__": from doctest import testmod testmod()
668
1
import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase = logging.get_logger(__name__) def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> str: _UpperCAmelCase = RobertaPreLayerNormConfig.from_pretrained( _lowerCAmelCase , architectures=["RobertaPreLayerNormForMaskedLM"] ) # convert state_dict _UpperCAmelCase = torch.load(hf_hub_download(repo_id=_lowerCAmelCase , filename="pytorch_model.bin" ) ) _UpperCAmelCase = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith("roberta." ): _UpperCAmelCase = "roberta_prelayernorm." + tensor_key[len("roberta." ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ): continue _UpperCAmelCase = tensor_value _UpperCAmelCase = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=_lowerCAmelCase , config=_lowerCAmelCase , state_dict=_lowerCAmelCase ) model.save_pretrained(_lowerCAmelCase ) # convert tokenizer _UpperCAmelCase = AutoTokenizer.from_pretrained(_lowerCAmelCase ) tokenizer.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint-repo", default=None, type=str, required=True, help="Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __lowerCAmelCase = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
129
import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration __lowerCAmelCase = 5_0_0_0_0_0 __lowerCAmelCase , __lowerCAmelCase = os.path.split(__file__) __lowerCAmelCase = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) @get_duration def __lowerCamelCase ( _lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: _UpperCAmelCase = dataset.map(**_lowerCAmelCase ) @get_duration def __lowerCamelCase ( _lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: _UpperCAmelCase = dataset.filter(**_lowerCAmelCase ) def __lowerCamelCase ( ) -> Union[str, Any]: _UpperCAmelCase = {"num examples": SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: _UpperCAmelCase = datasets.Features({"text": datasets.Value("string" ), "numbers": datasets.Value("float32" )} ) _UpperCAmelCase = generate_example_dataset( os.path.join(_lowerCAmelCase , "dataset.arrow" ) , _lowerCAmelCase , num_examples=_lowerCAmelCase ) _UpperCAmelCase = transformers.AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=_lowerCAmelCase ) def tokenize(_lowerCAmelCase ): return tokenizer(examples["text"] ) _UpperCAmelCase = map(_lowerCAmelCase ) _UpperCAmelCase = map(_lowerCAmelCase , batched=_lowerCAmelCase ) _UpperCAmelCase = map(_lowerCAmelCase , function=lambda _lowerCAmelCase : None , batched=_lowerCAmelCase ) with dataset.formatted_as(type="numpy" ): _UpperCAmelCase = map(_lowerCAmelCase , function=lambda _lowerCAmelCase : None , batched=_lowerCAmelCase ) with dataset.formatted_as(type="pandas" ): _UpperCAmelCase = map(_lowerCAmelCase , function=lambda _lowerCAmelCase : None , batched=_lowerCAmelCase ) with dataset.formatted_as(type="torch" , columns="numbers" ): _UpperCAmelCase = map(_lowerCAmelCase , function=lambda _lowerCAmelCase : None , batched=_lowerCAmelCase ) with dataset.formatted_as(type="tensorflow" , columns="numbers" ): _UpperCAmelCase = map(_lowerCAmelCase , function=lambda _lowerCAmelCase : None , batched=_lowerCAmelCase ) _UpperCAmelCase = map(_lowerCAmelCase , function=_lowerCAmelCase , batched=_lowerCAmelCase ) _UpperCAmelCase = filter(_lowerCAmelCase ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(_lowerCAmelCase , "wb" ) as f: f.write(json.dumps(_lowerCAmelCase ).encode("utf-8" ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
129
1
from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def lowerCAmelCase_ ( lowerCamelCase ): # A local function to see if a dot lands in the circle. def is_in_circle(lowerCamelCase , lowerCamelCase ) -> bool: __magic_name__ : Dict =sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle __magic_name__ : Union[str, Any] =mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(lowerCamelCase ) ) # The ratio of the area for circle to square is pi/4. __magic_name__ : List[Any] =proportion * 4 print(F"The estimated value of pi is {pi_estimate}" ) print(F"The numpy value of pi is {pi}" ) print(F"The total error is {abs(pi - pi_estimate )}" ) def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 , ): return mean( function_to_integrate(uniform(lowerCamelCase , lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value) def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ): def identity_function(lowerCamelCase ) -> float: return x __magic_name__ : Optional[int] =area_under_curve_estimator( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) __magic_name__ : str =(max_value * max_value - min_value * min_value) / 2 print("""******************""" ) print(F"Estimating area under y=x where x varies from {min_value} to {max_value}" ) print(F"Estimated value is {estimated_value}" ) print(F"Expected value is {expected_value}" ) print(F"Total error is {abs(estimated_value - expected_value )}" ) print("""******************""" ) def lowerCAmelCase_ ( lowerCamelCase ): def function_to_integrate(lowerCamelCase ) -> float: return sqrt(4.0 - x * x ) __magic_name__ : Dict =area_under_curve_estimator( lowerCamelCase , lowerCamelCase , 0.0 , 2.0 ) print("""******************""" ) print("""Estimating pi using area_under_curve_estimator""" ) print(F"Estimated value is {estimated_value}" ) print(F"Expected value is {pi}" ) print(F"Total error is {abs(estimated_value - pi )}" ) print("""******************""" ) if __name__ == "__main__": import doctest doctest.testmod()
21
from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def lowerCAmelCase_ ( lowerCamelCase ): # A local function to see if a dot lands in the circle. def is_in_circle(lowerCamelCase , lowerCamelCase ) -> bool: __magic_name__ : Dict =sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle __magic_name__ : Union[str, Any] =mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(lowerCamelCase ) ) # The ratio of the area for circle to square is pi/4. __magic_name__ : List[Any] =proportion * 4 print(F"The estimated value of pi is {pi_estimate}" ) print(F"The numpy value of pi is {pi}" ) print(F"The total error is {abs(pi - pi_estimate )}" ) def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 , ): return mean( function_to_integrate(uniform(lowerCamelCase , lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value) def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ): def identity_function(lowerCamelCase ) -> float: return x __magic_name__ : Optional[int] =area_under_curve_estimator( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) __magic_name__ : str =(max_value * max_value - min_value * min_value) / 2 print("""******************""" ) print(F"Estimating area under y=x where x varies from {min_value} to {max_value}" ) print(F"Estimated value is {estimated_value}" ) print(F"Expected value is {expected_value}" ) print(F"Total error is {abs(estimated_value - expected_value )}" ) print("""******************""" ) def lowerCAmelCase_ ( lowerCamelCase ): def function_to_integrate(lowerCamelCase ) -> float: return sqrt(4.0 - x * x ) __magic_name__ : Dict =area_under_curve_estimator( lowerCamelCase , lowerCamelCase , 0.0 , 2.0 ) print("""******************""" ) print("""Estimating pi using area_under_curve_estimator""" ) print(F"Estimated value is {estimated_value}" ) print(F"Expected value is {pi}" ) print(F"Total error is {abs(estimated_value - pi )}" ) print("""******************""" ) if __name__ == "__main__": import doctest doctest.testmod()
21
1
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if number < 0: raise ValueError('''number must not be negative''' ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
706
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
215
0
def lowerCAmelCase_ ( _lowercase : list) -> float: """simple docstring""" a__ : Dict = 0 while len(_lowercase) > 1: a__ : str = 0 # Consider two files with minimum cost to be merged for _ in range(2): a__ : Union[str, Any] = files.index(min(_lowercase)) temp += files[min_index] files.pop(_lowercase) files.append(_lowercase) optimal_merge_cost += temp return optimal_merge_cost if __name__ == "__main__": import doctest doctest.testmod()
136
import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class snake_case__ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @property def SCREAMING_SNAKE_CASE__( self ) -> Any: """simple docstring""" a__ : Union[str, Any] = 1 a__ : str = 3 a__ : Optional[int] = (3_2, 3_2) a__ : List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowercase ) return image @property def SCREAMING_SNAKE_CASE__( self ) -> List[Any]: """simple docstring""" torch.manual_seed(0 ) a__ : List[str] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , ) return model @property def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]: """simple docstring""" torch.manual_seed(0 ) a__ : Optional[Any] = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) return model @property def SCREAMING_SNAKE_CASE__( self ) -> Tuple: """simple docstring""" torch.manual_seed(0 ) a__ : List[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModel(__lowercase ) @property def SCREAMING_SNAKE_CASE__( self ) -> Tuple: """simple docstring""" def extract(*__lowercase , **__lowercase ): class snake_case__ : """simple docstring""" def __init__( self ) -> Any: """simple docstring""" a__ : Optional[Any] = torch.ones([0] ) def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Union[str, Any]: """simple docstring""" self.pixel_values.to(__lowercase ) return self return Out() return extract def SCREAMING_SNAKE_CASE__( self ) -> List[Any]: """simple docstring""" a__ : str = """cpu""" # ensure determinism for the device-dependent torch.Generator a__ : List[Any] = self.dummy_cond_unet a__ : Union[str, Any] = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__lowercase , set_alpha_to_one=__lowercase , ) a__ : int = self.dummy_vae a__ : List[Any] = self.dummy_text_encoder a__ : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # make sure here that pndm scheduler skips prk a__ : Any = StableDiffusionPipeline( unet=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , safety_checker=__lowercase , feature_extractor=self.dummy_extractor , ) a__ : List[str] = sd_pipe.to(__lowercase ) sd_pipe.set_progress_bar_config(disable=__lowercase ) a__ : Optional[Any] = """A painting of a squirrel eating a burger""" a__ : Union[str, Any] = torch.Generator(device=__lowercase ).manual_seed(0 ) a__ : List[Any] = sd_pipe([prompt] , generator=__lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" ) a__ : Any = output.images a__ : Tuple = torch.Generator(device=__lowercase ).manual_seed(0 ) a__ : Optional[int] = sd_pipe( [prompt] , generator=__lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=__lowercase , )[0] a__ : Any = image[0, -3:, -3:, -1] a__ : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) a__ : Any = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]: """simple docstring""" a__ : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator a__ : Optional[int] = self.dummy_cond_unet a__ : Optional[Any] = PNDMScheduler(skip_prk_steps=__lowercase ) a__ : int = self.dummy_vae a__ : List[Any] = self.dummy_text_encoder a__ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # make sure here that pndm scheduler skips prk a__ : Optional[int] = StableDiffusionPipeline( unet=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , safety_checker=__lowercase , feature_extractor=self.dummy_extractor , ) a__ : Dict = sd_pipe.to(__lowercase ) sd_pipe.set_progress_bar_config(disable=__lowercase ) a__ : Tuple = """A painting of a squirrel eating a burger""" a__ : Optional[int] = torch.Generator(device=__lowercase ).manual_seed(0 ) a__ : List[str] = sd_pipe([prompt] , generator=__lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" ) a__ : List[str] = output.images a__ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 ) a__ : Dict = sd_pipe( [prompt] , generator=__lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=__lowercase , )[0] a__ : Optional[Any] = image[0, -3:, -3:, -1] a__ : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) a__ : Union[str, Any] = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def SCREAMING_SNAKE_CASE__( self ) -> Dict: """simple docstring""" a__ : Tuple = StableDiffusionPipeline.from_pretrained( """hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=__lowercase ) assert isinstance(__lowercase , __lowercase ) assert isinstance(pipe.scheduler , __lowercase ) assert pipe.safety_checker is None a__ : int = pipe("""example prompt""" , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(__lowercase ) a__ : List[Any] = StableDiffusionPipeline.from_pretrained(__lowercase ) # sanity check that the pipeline still works assert pipe.safety_checker is None a__ : Dict = pipe("""example prompt""" , num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]: """simple docstring""" a__ : Optional[int] = self.dummy_cond_unet a__ : Any = PNDMScheduler(skip_prk_steps=__lowercase ) a__ : int = self.dummy_vae a__ : List[Any] = self.dummy_text_encoder a__ : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # put models in fp16 a__ : Union[str, Any] = unet.half() a__ : List[str] = vae.half() a__ : List[str] = bert.half() # make sure here that pndm scheduler skips prk a__ : str = StableDiffusionPipeline( unet=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , safety_checker=__lowercase , feature_extractor=self.dummy_extractor , ) a__ : str = sd_pipe.to(__lowercase ) sd_pipe.set_progress_bar_config(disable=__lowercase ) a__ : List[Any] = """A painting of a squirrel eating a burger""" a__ : Tuple = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images assert image.shape == (1, 6_4, 6_4, 3) @nightly @require_torch_gpu class snake_case__ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE__( self ) -> str: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]: """simple docstring""" a__ : List[str] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=__lowercase ) a__ : Optional[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) a__ : Any = sd_pipe.to(__lowercase ) sd_pipe.set_progress_bar_config(disable=__lowercase ) a__ : int = ( """portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle""" """ coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with""" """ anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and""" """ children from bahnhof zoo, detailed """ ) a__ : str = 4_0_0_3_6_6_0_3_4_6 a__ : Optional[Any] = 7 # without safety guidance (sld_guidance_scale = 0) a__ : Tuple = torch.manual_seed(__lowercase ) a__ : List[Any] = sd_pipe( [prompt] , generator=__lowercase , guidance_scale=__lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , ) a__ : List[str] = output.images a__ : int = image[0, -3:, -3:, -1] a__ : List[Any] = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 # without safety guidance (strong configuration) a__ : List[Any] = torch.manual_seed(__lowercase ) a__ : Optional[Any] = sd_pipe( [prompt] , generator=__lowercase , guidance_scale=__lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) a__ : Union[str, Any] = output.images a__ : List[str] = image[0, -3:, -3:, -1] a__ : List[Any] = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def SCREAMING_SNAKE_CASE__( self ) -> List[str]: """simple docstring""" a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=__lowercase ) a__ : int = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) a__ : Optional[Any] = sd_pipe.to(__lowercase ) sd_pipe.set_progress_bar_config(disable=__lowercase ) a__ : Any = """padme amidala taking a bath artwork, safe for work, no nudity""" a__ : Any = 2_7_3_4_9_7_1_7_5_5 a__ : Union[str, Any] = 7 a__ : Tuple = torch.manual_seed(__lowercase ) a__ : Optional[Any] = sd_pipe( [prompt] , generator=__lowercase , guidance_scale=__lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , ) a__ : Union[str, Any] = output.images a__ : str = image[0, -3:, -3:, -1] a__ : Any = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 a__ : str = torch.manual_seed(__lowercase ) a__ : Dict = sd_pipe( [prompt] , generator=__lowercase , guidance_scale=__lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) a__ : str = output.images a__ : int = image[0, -3:, -3:, -1] a__ : Optional[Any] = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def SCREAMING_SNAKE_CASE__( self ) -> List[str]: """simple docstring""" a__ : Optional[int] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ) a__ : List[Any] = sd_pipe.to(__lowercase ) sd_pipe.set_progress_bar_config(disable=__lowercase ) a__ : Tuple = ( """the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.""" """ leyendecker""" ) a__ : List[Any] = 1_0_4_4_3_5_5_2_3_4 a__ : List[Any] = 1_2 a__ : Union[str, Any] = torch.manual_seed(__lowercase ) a__ : Optional[int] = sd_pipe( [prompt] , generator=__lowercase , guidance_scale=__lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , ) a__ : Dict = output.images a__ : Union[str, Any] = image[0, -3:, -3:, -1] a__ : Optional[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7 a__ : Any = torch.manual_seed(__lowercase ) a__ : Union[str, Any] = sd_pipe( [prompt] , generator=__lowercase , guidance_scale=__lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) a__ : Dict = output.images a__ : List[Any] = image[0, -3:, -3:, -1] a__ : Optional[int] = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] ) assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
136
1
"""simple docstring""" def __lowerCAmelCase ( __UpperCamelCase : int = 1_0_0_0_0_0_0 ): '''simple docstring''' snake_case_ : int = 1 snake_case_ : Optional[int] = 1 snake_case_ : Optional[Any] = {1: 1} for inputa in range(2 , lowerCAmelCase__ ): snake_case_ : Dict = 0 snake_case_ : Any = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: snake_case_ : Any = (3 * number) + 1 counter += 1 if inputa not in counters: snake_case_ : Optional[int] = counter if counter > pre_counter: snake_case_ : List[str] = inputa snake_case_ : Union[str, Any] = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
708
"""simple docstring""" import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' snake_case_ : int = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" ) snake_case_ : List[Any] = AutoTokenizer.from_pretrained("""google/mt5-small""" ) snake_case_ : Dict = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids snake_case_ : List[str] = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids snake_case_ : Optional[Any] = shift_tokens_right(_lowercase , model.config.pad_token_id , model.config.decoder_start_token_id ) snake_case_ : Tuple = model(_lowercase , decoder_input_ids=_lowercase ).logits snake_case_ : Tuple = optax.softmax_cross_entropy(_lowercase , onehot(_lowercase , logits.shape[-1] ) ).mean() snake_case_ : List[str] = -(labels.shape[-1] * loss.item()) snake_case_ : Optional[int] = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
21
0
'''simple docstring''' from itertools import product def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> list[int]: UpperCAmelCase__ : Optional[Any] = sides_number UpperCAmelCase__ : Optional[Any] = max_face_number * dice_number UpperCAmelCase__ : Optional[int] = [0] * (max_total + 1) UpperCAmelCase__ : Union[str, Any] = 1 UpperCAmelCase__ : List[str] = range(lowerCAmelCase__ , max_face_number + 1 ) for dice_numbers in product(lowerCAmelCase__ , repeat=lowerCAmelCase__ ): UpperCAmelCase__ : Optional[Any] = sum(lowerCAmelCase__ ) totals_frequencies[total] += 1 return totals_frequencies def a__ ( ) -> float: UpperCAmelCase__ : int = total_frequency_distribution( sides_number=4 , dice_number=9 ) UpperCAmelCase__ : Optional[int] = total_frequency_distribution( sides_number=6 , dice_number=6 ) UpperCAmelCase__ : Union[str, Any] = 0 UpperCAmelCase__ : str = 9 UpperCAmelCase__ : str = 4 * 9 UpperCAmelCase__ : Tuple = 6 for peter_total in range(lowerCAmelCase__ , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) UpperCAmelCase__ : str = (4**9) * (6**6) UpperCAmelCase__ : Union[str, Any] = peter_wins_count / total_games_number UpperCAmelCase__ : Any = round(lowerCAmelCase__ , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(F"""{solution() = }""")
75
import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor _lowercase : int =logging.get_logger(__name__) class UpperCamelCase_ ( snake_case__ ): def __init__( self : Tuple , *lowerCamelCase : List[Any] , **lowerCamelCase : Union[str, Any] ): warnings.warn( 'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use YolosImageProcessor instead.' , lowerCamelCase , ) super().__init__(*lowerCamelCase , **lowerCamelCase )
364
0
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class SCREAMING_SNAKE_CASE_ (a__ , unittest.TestCase ): '''simple docstring''' _a = KandinskyInpaintPipeline _a = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"] _a = [ "prompt", "negative_prompt", "image_embeds", "negative_image_embeds", "image", "mask_image", ] _a = [ "generator", "height", "width", "latents", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] _a = False @property def _lowerCAmelCase ( self : List[str] ) ->Tuple: return 32 @property def _lowerCAmelCase ( self : int ) ->Any: return 32 @property def _lowerCAmelCase ( self : Tuple ) ->Any: return self.time_input_dim @property def _lowerCAmelCase ( self : str ) ->Tuple: return self.time_input_dim * 4 @property def _lowerCAmelCase ( self : Any ) ->List[str]: return 100 @property def _lowerCAmelCase ( self : Any ) ->List[str]: lowerCamelCase_ : int = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" ) return tokenizer @property def _lowerCAmelCase ( self : Any ) ->Optional[int]: torch.manual_seed(0 ) lowerCamelCase_ : Any = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , ) lowerCamelCase_ : Optional[Any] = MultilingualCLIP(__a ) lowerCamelCase_ : str = text_encoder.eval() return text_encoder @property def _lowerCAmelCase ( self : Optional[Any] ) ->List[str]: torch.manual_seed(0 ) lowerCamelCase_ : Tuple = { """in_channels""": 9, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """text_image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """text_image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } lowerCamelCase_ : Optional[Any] = UNetaDConditionModel(**__a ) return model @property def _lowerCAmelCase ( self : Optional[Any] ) ->str: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def _lowerCAmelCase ( self : List[str] ) ->Optional[int]: torch.manual_seed(0 ) lowerCamelCase_ : int = VQModel(**self.dummy_movq_kwargs ) return model def _lowerCAmelCase ( self : Union[str, Any] ) ->List[str]: lowerCamelCase_ : Union[str, Any] = self.dummy_text_encoder lowerCamelCase_ : Union[str, Any] = self.dummy_tokenizer lowerCamelCase_ : List[Any] = self.dummy_unet lowerCamelCase_ : Tuple = self.dummy_movq lowerCamelCase_ : Union[str, Any] = DDIMScheduler( num_train_timesteps=1_000 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__a , set_alpha_to_one=__a , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__a , ) lowerCamelCase_ : Optional[Any] = { """text_encoder""": text_encoder, """tokenizer""": tokenizer, """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def _lowerCAmelCase ( self : int , __a : str , __a : Dict=0 ) ->Optional[Any]: lowerCamelCase_ : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__a ) ).to(__a ) lowerCamelCase_ : Optional[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__a ) # create init_image lowerCamelCase_ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__a ) ).to(__a ) lowerCamelCase_ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase_ : Union[str, Any] = Image.fromarray(np.uinta(__a ) ).convert("""RGB""" ).resize((256, 256) ) # create mask lowerCamelCase_ : Union[str, Any] = np.ones((64, 64) , dtype=np.floataa ) lowerCamelCase_ : Tuple = 0 if str(__a ).startswith("""mps""" ): lowerCamelCase_ : Union[str, Any] = torch.manual_seed(__a ) else: lowerCamelCase_ : int = torch.Generator(device=__a ).manual_seed(__a ) lowerCamelCase_ : str = { """prompt""": """horse""", """image""": init_image, """mask_image""": mask, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 2, """guidance_scale""": 4.0, """output_type""": """np""", } return inputs def _lowerCAmelCase ( self : List[Any] ) ->Tuple: lowerCamelCase_ : Any = """cpu""" lowerCamelCase_ : str = self.get_dummy_components() lowerCamelCase_ : Any = self.pipeline_class(**__a ) lowerCamelCase_ : Any = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) lowerCamelCase_ : Optional[int] = pipe(**self.get_dummy_inputs(__a ) ) lowerCamelCase_ : Union[str, Any] = output.images lowerCamelCase_ : Optional[int] = pipe( **self.get_dummy_inputs(__a ) , return_dict=__a , )[0] lowerCamelCase_ : Optional[Any] = image[0, -3:, -3:, -1] lowerCamelCase_ : Optional[int] = image_from_tuple[0, -3:, -3:, -1] print(F'''image.shape {image.shape}''' ) assert image.shape == (1, 64, 64, 3) lowerCamelCase_ : Dict = np.array( [0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' def _lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class SCREAMING_SNAKE_CASE_ (unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self : Tuple ) ->Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase ( self : Optional[int] ) ->Any: lowerCamelCase_ : Optional[int] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" ) lowerCamelCase_ : Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) lowerCamelCase_ : Tuple = np.ones((768, 768) , dtype=np.floataa ) lowerCamelCase_ : Tuple = 0 lowerCamelCase_ : Any = """a hat""" lowerCamelCase_ : int = KandinskyPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(__a ) lowerCamelCase_ : int = KandinskyInpaintPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa ) lowerCamelCase_ : Optional[Any] = pipeline.to(__a ) pipeline.set_progress_bar_config(disable=__a ) lowerCamelCase_ : Optional[int] = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowerCamelCase_, lowerCamelCase_ : List[Any] = pipe_prior( __a , generator=__a , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() lowerCamelCase_ : str = pipeline( __a , image=__a , mask_image=__a , image_embeds=__a , negative_image_embeds=__a , generator=__a , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , ) lowerCamelCase_ : Union[str, Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__a , __a )
171
from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class SCREAMING_SNAKE_CASE_ (a__ ): '''simple docstring''' _a = "" _a = "hf-legacy" # "hf://"" is reserved for hffs def __init__( self : Any , __a : Optional[DatasetInfo] = None , __a : Optional[str] = None , **__a : Any , ) ->Any: super().__init__(self , **__a ) lowerCamelCase_ : Tuple = repo_info lowerCamelCase_ : Any = token lowerCamelCase_ : Any = None def _lowerCAmelCase ( self : Optional[int] ) ->List[Any]: if self.dir_cache is None: lowerCamelCase_ : Dict = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes lowerCamelCase_ : int = { """name""": hf_file.rfilename, """size""": None, """type""": """file""", } self.dir_cache.update( { str(__a ): {"""name""": str(__a ), """size""": None, """type""": """directory"""} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def _lowerCAmelCase ( self : int , __a : str , __a : str = "rb" , **__a : Optional[Any] , ) ->Dict: if not isinstance(self.repo_info , __a ): raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' ) lowerCamelCase_ : int = hf_hub_url(self.repo_info.id , __a , revision=self.repo_info.sha ) return fsspec.open( __a , mode=__a , headers=get_authentication_headers_for_url(__a , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open() def _lowerCAmelCase ( self : Dict , __a : str , **__a : List[Any] ) ->List[Any]: self._get_dirs() lowerCamelCase_ : Tuple = self._strip_protocol(__a ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(__a ) def _lowerCAmelCase ( self : Any , __a : Optional[Any] , __a : str=False , **__a : List[str] ) ->List[Any]: self._get_dirs() lowerCamelCase_ : Optional[Any] = PurePosixPath(path.strip("""/""" ) ) lowerCamelCase_ : Dict = {} for p, f in self.dir_cache.items(): lowerCamelCase_ : str = PurePosixPath(p.strip("""/""" ) ) lowerCamelCase_ : Dict = p.parent if root == path: lowerCamelCase_ : int = f lowerCamelCase_ : List[str] = list(paths.values() ) if detail: return out else: return sorted(f["""name"""] for f in out )
171
1
'''simple docstring''' import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __SCREAMING_SNAKE_CASE ( UpperCAmelCase_ , unittest.TestCase ): lowerCamelCase_ = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline' def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=0 ): '''simple docstring''' lowercase : Optional[int] =floats_tensor((1, 3, 128, 128) , rng=random.Random(a_ ) ) lowercase : Optional[Any] =np.random.RandomState(a_ ) lowercase : Optional[int] ={ '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''strength''': 0.75, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : List[Any] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=a_ ) lowercase : Optional[Any] =self.get_dummy_inputs() lowercase : Tuple =pipe(**a_ ).images lowercase : List[str] =image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) lowercase : Any =np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def lowerCamelCase_ ( self : str ): '''simple docstring''' lowercase : Tuple =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowercase : List[str] =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a_ ) pipe.set_progress_bar_config(disable=a_ ) lowercase : Union[str, Any] =self.get_dummy_inputs() lowercase : Optional[Any] =pipe(**a_ ).images lowercase : str =image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowercase : Union[str, Any] =np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Union[str, Any] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowercase : Tuple =LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a_ ) # warmup pass to apply optimizations lowercase : int =pipe(**self.get_dummy_inputs() ) lowercase : Optional[int] =self.get_dummy_inputs() lowercase : Union[str, Any] =pipe(**a_ ).images lowercase : List[Any] =image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowercase : Tuple =np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Union[str, Any] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowercase : Optional[int] =EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a_ ) lowercase : int =self.get_dummy_inputs() lowercase : Any =pipe(**a_ ).images lowercase : List[Any] =image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowercase : List[str] =np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : Dict =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowercase : List[str] =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a_ ) lowercase : List[Any] =self.get_dummy_inputs() lowercase : Any =pipe(**a_ ).images lowercase : List[Any] =image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowercase : List[Any] =np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : List[str] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowercase : str =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a_ ) lowercase : Optional[Any] =self.get_dummy_inputs() lowercase : Dict =pipe(**a_ ).images lowercase : Any =image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowercase : Dict =np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @property def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : Tuple =ort.SessionOptions() lowercase : int =False return options def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase : List[Any] =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) lowercase : int =init_image.resize((768, 512) ) # using the PNDM scheduler by default lowercase : Tuple =OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=a_ , feature_extractor=a_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=a_ ) lowercase : str ='''A fantasy landscape, trending on artstation''' lowercase : Union[str, Any] =np.random.RandomState(0 ) lowercase : Any =pipe( prompt=a_ , image=a_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=a_ , output_type='''np''' , ) lowercase : Union[str, Any] =output.images lowercase : Tuple =images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) lowercase : List[Any] =np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : int =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) lowercase : List[str] =init_image.resize((768, 512) ) lowercase : Optional[Any] =LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' ) lowercase : List[str] =OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=a_ , safety_checker=a_ , feature_extractor=a_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=a_ ) lowercase : List[str] ='''A fantasy landscape, trending on artstation''' lowercase : List[Any] =np.random.RandomState(0 ) lowercase : Any =pipe( prompt=a_ , image=a_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=a_ , output_type='''np''' , ) lowercase : List[str] =output.images lowercase : int =images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) lowercase : Optional[Any] =np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
92
from sklearn.metrics import recall_score import datasets a__ : str = """ Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. """ a__ : Dict = """ Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {'recall': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {'recall': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric('recall') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {'recall': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric('recall') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'recall': array([1., 0., 0.])} """ a__ : List[Any] = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase ( datasets.Metric ): """simple docstring""" def _UpperCamelCase ( self : List[Any] ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , ) def _UpperCamelCase ( self : Dict , a_ : Tuple , a_ : Optional[Any] , a_ : Union[str, Any]=None , a_ : List[Any]=1 , a_ : List[str]="binary" , a_ : List[str]=None , a_ : int="warn" , ): """simple docstring""" lowerCamelCase__ = recall_score( a_ , a_ , labels=a_ , pos_label=a_ , average=a_ , sample_weight=a_ , zero_division=a_ , ) return {"recall": float(a_ ) if score.size == 1 else score}
165
0
"""simple docstring""" from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : int = { '''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''', # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class snake_case_ ( _lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_: Union[str, Any] = """gptj""" SCREAMING_SNAKE_CASE_: Union[str, Any] = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , __a=5_0400 , __a=2048 , __a=4096 , __a=28 , __a=16 , __a=64 , __a=None , __a="gelu_new" , __a=0.0 , __a=0.0 , __a=0.0 , __a=1E-5 , __a=0.02 , __a=True , __a=5_0256 , __a=5_0256 , __a=False , **__a , ): """simple docstring""" A__ = vocab_size A__ = n_positions A__ = n_embd A__ = n_layer A__ = n_head A__ = n_inner A__ = rotary_dim A__ = activation_function A__ = resid_pdrop A__ = embd_pdrop A__ = attn_pdrop A__ = layer_norm_epsilon A__ = initializer_range A__ = use_cache A__ = bos_token_id A__ = eos_token_id super().__init__( bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a ) class snake_case_ ( _lowerCamelCase ): """simple docstring""" def __init__( self , __a , __a = "default" , __a = None , __a = False , ): """simple docstring""" super().__init__(__a , task=__a , patching_specs=__a , use_past=__a ) if not getattr(self._config , 'pad_token_id' , __a ): # TODO: how to do that better? A__ = 0 @property def _UpperCAmelCase ( self ): """simple docstring""" A__ = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(__a , direction='inputs' ) A__ = {0: 'batch', 1: 'past_sequence + sequence'} else: A__ = {0: 'batch', 1: 'sequence'} return common_inputs @property def _UpperCAmelCase ( self ): """simple docstring""" return self._config.n_layer @property def _UpperCAmelCase ( self ): """simple docstring""" return self._config.n_head def _UpperCAmelCase ( self , __a , __a = -1 , __a = -1 , __a = False , __a = None , ): """simple docstring""" A__ = super(__a , self ).generate_dummy_inputs( __a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a ) # We need to order the input in the way they appears in the forward() A__ = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch A__ , A__ = common_inputs['input_ids'].shape # Not using the same length for past_key_values A__ = seqlen + 2 A__ = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) A__ = [ (torch.zeros(__a ), torch.zeros(__a )) for _ in range(self.num_layers ) ] A__ = common_inputs['attention_mask'] if self.use_past: A__ = ordered_inputs['attention_mask'].dtype A__ = torch.cat( [ordered_inputs['attention_mask'], torch.ones(__a , __a , dtype=__a )] , dim=1 ) return ordered_inputs @property def _UpperCAmelCase ( self ): """simple docstring""" return 13
706
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : List[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} SCREAMING_SNAKE_CASE : List[Any] = { '''tokenizer_file''': { '''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''', }, } SCREAMING_SNAKE_CASE : str = { '''gpt-neox-20b''': 2_048, } class snake_case_ ( _lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_: Union[str, Any] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_: Any = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_: List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_: Optional[Any] = ["""input_ids""", """attention_mask"""] def __init__( self , __a=None , __a=None , __a=None , __a="<|endoftext|>" , __a="<|endoftext|>" , __a="<|endoftext|>" , __a=False , **__a , ): """simple docstring""" super().__init__( __a , __a , tokenizer_file=__a , unk_token=__a , bos_token=__a , eos_token=__a , add_prefix_space=__a , **__a , ) A__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , __a ) != add_prefix_space: A__ = getattr(__a , pre_tok_state.pop('type' ) ) A__ = add_prefix_space A__ = pre_tok_class(**__a ) A__ = add_prefix_space def _UpperCAmelCase ( self , __a , __a = None ): """simple docstring""" A__ = self._tokenizer.model.save(__a , name=__a ) return tuple(__a ) def _UpperCAmelCase ( self , __a ): """simple docstring""" A__ = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__a , add_special_tokens=__a ) + [self.eos_token_id] ) if len(__a ) > self.model_max_length: A__ = input_ids[-self.model_max_length :] return input_ids
554
0
import math import unittest def lowercase__ ( A_: int ) -> bool: """simple docstring""" assert isinstance(A_ , A_ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(A_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class _A ( unittest.TestCase ): """simple docstring""" def _a ( self : Optional[int] ) -> Optional[int]: self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(11 ) ) self.assertTrue(is_prime(13 ) ) self.assertTrue(is_prime(17 ) ) self.assertTrue(is_prime(19 ) ) self.assertTrue(is_prime(23 ) ) self.assertTrue(is_prime(29 ) ) def _a ( self : Dict ) -> List[str]: with self.assertRaises(__SCREAMING_SNAKE_CASE ): is_prime(-19 ) self.assertFalse( is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , ) self.assertFalse( is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , ) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
68
"""simple docstring""" from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __lowercase( lowercase__ ): '''simple docstring''' __a : List[Any] = ['image_processor', 'tokenizer'] __a : List[Any] = 'BlipImageProcessor' __a : str = ('BertTokenizer', 'BertTokenizerFast') def __init__( self , __a , __a ): __lowerCamelCase : str = False super().__init__(__a , __a ) __lowerCamelCase : Union[str, Any] = self.image_processor def __call__( self , __a = None , __a = None , __a = True , __a = False , __a = None , __a = None , __a = 0 , __a = None , __a = None , __a = False , __a = False , __a = False , __a = False , __a = False , __a = True , __a = None , **__a , ): if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None: __lowerCamelCase : List[Any] = self.tokenizer __lowerCamelCase : List[str] = self.tokenizer( text=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_token_type_ids=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , ) return text_encoding # add pixel_values __lowerCamelCase : Any = self.image_processor(__a , return_tensors=__a ) if text is not None: __lowerCamelCase : Tuple = self.tokenizer( text=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_token_type_ids=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , ) else: __lowerCamelCase : Union[str, Any] = None if text_encoding is not None: encoding_image_processor.update(__a ) return encoding_image_processor def snake_case_ ( self , *__a , **__a ): return self.tokenizer.batch_decode(*__a , **__a ) def snake_case_ ( self , *__a , **__a ): return self.tokenizer.decode(*__a , **__a ) @property def snake_case_ ( self ): __lowerCamelCase : Dict = self.tokenizer.model_input_names __lowerCamelCase : Optional[int] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
594
0
import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class a_ ( lowerCamelCase_ , unittest.TestCase ): """simple docstring""" __UpperCAmelCase = LayoutLMTokenizer __UpperCAmelCase = LayoutLMTokenizerFast __UpperCAmelCase = True __UpperCAmelCase = True def _lowerCAmelCase ( self : Dict ): super().setUp() SCREAMING_SNAKE_CASE =[ '[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def _lowerCAmelCase ( self : List[str] ,**snake_case : Any ): return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**snake_case ) def _lowerCAmelCase ( self : Optional[int] ,snake_case : Optional[int] ): SCREAMING_SNAKE_CASE ='UNwant\u00E9d,running' SCREAMING_SNAKE_CASE ='unwanted, running' return input_text, output_text def _lowerCAmelCase ( self : List[Any] ): SCREAMING_SNAKE_CASE =self.tokenizer_class(self.vocab_file ) SCREAMING_SNAKE_CASE =tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(snake_case ,['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) ,[7, 4, 5, 10, 8, 9] ) def _lowerCAmelCase ( self : Optional[int] ): pass
701
import argparse import torch from transformers import GPTaLMHeadModel, RobertaForMaskedLM if __name__ == "__main__": _lowerCamelCase =argparse.ArgumentParser( description=( "Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned" " Distillation" ) ) parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"]) parser.add_argument("--model_name", default="roberta-large", type=str) parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str) parser.add_argument("--vocab_transform", action="store_true") _lowerCamelCase =parser.parse_args() if args.model_type == "roberta": _lowerCamelCase =RobertaForMaskedLM.from_pretrained(args.model_name) _lowerCamelCase ="roberta" elif args.model_type == "gpt2": _lowerCamelCase =GPTaLMHeadModel.from_pretrained(args.model_name) _lowerCamelCase ="transformer" _lowerCamelCase =model.state_dict() _lowerCamelCase ={} # Embeddings # if args.model_type == "gpt2": for param_name in ["wte.weight", "wpe.weight"]: _lowerCamelCase =state_dict[f'{prefix}.{param_name}'] else: for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]: _lowerCamelCase =f'{prefix}.embeddings.{w}.weight' _lowerCamelCase =state_dict[param_name] for w in ["weight", "bias"]: _lowerCamelCase =f'{prefix}.embeddings.LayerNorm.{w}' _lowerCamelCase =state_dict[param_name] # Transformer Blocks # _lowerCamelCase =0 for teacher_idx in [0, 2, 4, 7, 9, 11]: if args.model_type == "gpt2": for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]: for w in ["weight", "bias"]: _lowerCamelCase =state_dict[ f'{prefix}.h.{teacher_idx}.{layer}.{w}' ] _lowerCamelCase =state_dict[f'{prefix}.h.{teacher_idx}.attn.bias'] else: for layer in [ "attention.self.query", "attention.self.key", "attention.self.value", "attention.output.dense", "attention.output.LayerNorm", "intermediate.dense", "output.dense", "output.LayerNorm", ]: for w in ["weight", "bias"]: _lowerCamelCase =state_dict[ f'{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}' ] std_idx += 1 # Language Modeling Head ###s if args.model_type == "roberta": for layer in ["lm_head.decoder.weight", "lm_head.bias"]: _lowerCamelCase =state_dict[f'{layer}'] if args.vocab_transform: for w in ["weight", "bias"]: _lowerCamelCase =state_dict[f'lm_head.dense.{w}'] _lowerCamelCase =state_dict[f'lm_head.layer_norm.{w}'] elif args.model_type == "gpt2": for w in ["weight", "bias"]: _lowerCamelCase =state_dict[f'{prefix}.ln_f.{w}'] _lowerCamelCase =state_dict["lm_head.weight"] print(f'N layers selected for distillation: {std_idx}') print(f'Number of params transferred for distillation: {len(compressed_sd.keys())}') print(f'Save transferred checkpoint to {args.dump_checkpoint}.') torch.save(compressed_sd, args.dump_checkpoint)
252
0
'''simple docstring''' import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() _a : Dict = logging.get_logger(__name__) def _a (lowercase__ : str , lowercase__ : str ) -> Tuple: """simple docstring""" __snake_case = RobertaPreLayerNormConfig.from_pretrained( lowercase__ , architectures=['RobertaPreLayerNormForMaskedLM'] ) # convert state_dict __snake_case = torch.load(hf_hub_download(repo_id=lowercase__ , filename='pytorch_model.bin' ) ) __snake_case = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith('roberta.' ): __snake_case = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ): continue __snake_case = tensor_value __snake_case = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=lowercase__ , config=lowercase__ , state_dict=lowercase__ ) model.save_pretrained(lowercase__ ) # convert tokenizer __snake_case = AutoTokenizer.from_pretrained(lowercase__ ) tokenizer.save_pretrained(lowercase__ ) if __name__ == "__main__": _a : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint-repo", default=None, type=str, required=True, help="Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) _a : List[Any] = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
56
from abc import ABC, abstractmethod from argparse import ArgumentParser class __magic_name__ ( _a): @staticmethod @abstractmethod def _UpperCAmelCase ( __SCREAMING_SNAKE_CASE : ArgumentParser ): raise NotImplementedError() @abstractmethod def _UpperCAmelCase ( self : List[Any] ): raise NotImplementedError()
333
0
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def A ( UpperCAmelCase ): _snake_case : Union[str, Any] = filter(lambda UpperCAmelCase : p.requires_grad , model.parameters() ) _snake_case : Optional[int] = sum([np.prod(p.size() ) for p in model_parameters] ) return params __lowerCAmelCase :List[Any] = logging.getLogger(__name__) def A ( UpperCAmelCase , UpperCAmelCase ): if metric == "rouge2": _snake_case : Union[str, Any] = """{val_avg_rouge2:.4f}-{step_count}""" elif metric == "bleu": _snake_case : List[Any] = """{val_avg_bleu:.4f}-{step_count}""" elif metric == "em": _snake_case : Optional[int] = """{val_avg_em:.4f}-{step_count}""" elif metric == "loss": _snake_case : Union[str, Any] = """{val_avg_loss:.4f}-{step_count}""" else: raise NotImplementedError( F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this""" " function." ) _snake_case : List[str] = ModelCheckpoint( dirpath=UpperCAmelCase , filename=UpperCAmelCase , monitor=F"""val_{metric}""" , mode="max" , save_top_k=1 , every_n_epochs=1 , ) return checkpoint_callback def A ( UpperCAmelCase , UpperCAmelCase ): return EarlyStopping( monitor=F"""val_{metric}""" , mode="min" if "loss" in metric else "max" , patience=UpperCAmelCase , verbose=UpperCAmelCase , ) class _a( pl.Callback ): def lowercase ( self , __snake_case , __snake_case ) -> int: '''simple docstring''' _snake_case : List[Any] = {f"""lr_group_{i}""": param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(_a ) @rank_zero_only def lowercase ( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> str: '''simple docstring''' logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" ) _snake_case : int = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} ) # Log results _snake_case : str = Path(pl_module.hparams.output_dir ) if type_path == "test": _snake_case : Optional[Any] = od / """test_results.txt""" _snake_case : Any = od / """test_generations.txt""" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. _snake_case : Dict = od / f"""{type_path}_results/{trainer.global_step:05d}.txt""" _snake_case : Dict = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt""" results_file.parent.mkdir(exist_ok=_a ) generations_file.parent.mkdir(exist_ok=_a ) with open(_a , "a+" ) as writer: for key in sorted(_a ): if key in ["log", "progress_bar", "preds"]: continue _snake_case : Dict = metrics[key] if isinstance(_a , torch.Tensor ): _snake_case : Any = val.item() _snake_case : Optional[int] = f"""{key}: {val:.6f}\n""" writer.write(_a ) if not save_generations: return if "preds" in metrics: _snake_case : List[str] = """\n""".join(metrics["preds"] ) generations_file.open("w+" ).write(_a ) @rank_zero_only def lowercase ( self , __snake_case , __snake_case ) -> str: '''simple docstring''' try: _snake_case : Union[str, Any] = pl_module.model.model.num_parameters() except AttributeError: _snake_case : str = pl_module.model.num_parameters() _snake_case : Dict = count_trainable_parameters(_a ) # mp stands for million parameters trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} ) @rank_zero_only def lowercase ( self , __snake_case , __snake_case ) -> List[Any]: '''simple docstring''' save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(_a , _a , "test" ) @rank_zero_only def lowercase ( self , __snake_case , __snake_case ) -> str: '''simple docstring''' save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
705
def A ( UpperCAmelCase ): if n == 1 or not isinstance(UpperCAmelCase , UpperCAmelCase ): return 0 elif n == 2: return 1 else: _snake_case : List[Any] = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def A ( UpperCAmelCase ): _snake_case : Tuple = 0 _snake_case : Optional[Any] = 2 while digits < n: index += 1 _snake_case : str = len(str(fibonacci(UpperCAmelCase ) ) ) return index def A ( UpperCAmelCase = 1_000 ): return fibonacci_digits_index(UpperCAmelCase ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
278
0
"""simple docstring""" import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness snake_case = '''\ @misc{chen2021evaluating, title={Evaluating Large Language Models Trained on Code}, author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \ and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \ and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \ and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \ and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \ and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \ and Mohammad Bavarian and Clemens Winter and Philippe Tillet \ and Felipe Petroski Such and Dave Cummings and Matthias Plappert \ and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \ and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \ and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \ and William Saunders and Christopher Hesse and Andrew N. Carr \ and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \ and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \ and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \ and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba}, year={2021}, eprint={2107.03374}, archivePrefix={arXiv}, primaryClass={cs.LG} } ''' snake_case = '''\ This metric implements the evaluation harness for the HumanEval problem solving dataset described in the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). ''' snake_case = ''' Calculates how good are predictions given some references, using certain scores Args: predictions: list of candidates to evaluate. Each candidates should be a list of strings with several code candidates to solve the problem. references: a list with a test for each prediction. Each test should evaluate the correctness of a code candidate. k: number of code candidates to consider in the evaluation (Default: [1, 10, 100]) num_workers: number of workers used to evaluate the canidate programs (Default: 4). timeout: Returns: pass_at_k: dict with pass rates for each k results: dict with granular results of each unittest Examples: >>> code_eval = datasets.load_metric("code_eval") >>> test_cases = ["assert add(2,3)==5"] >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]] >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2]) >>> print(pass_at_k) {\'pass@1\': 0.5, \'pass@2\': 1.0} ''' snake_case = ''' ################################################################################ !!!WARNING!!! ################################################################################ The "code_eval" metric executes untrusted model-generated code in Python. Although it is highly unlikely that model-generated code will do something overtly malicious in response to this test suite, model-generated code may act destructively due to a lack of model capability or alignment. Users are strongly encouraged to sandbox this evaluation suite so that it does not perform destructive actions on their host or network. For more information on how OpenAI sandboxes its code, see the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). Once you have read this disclaimer and taken appropriate precautions, set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this with: >>> import os >>> os.environ["HF_ALLOW_CODE_EVAL"] = "1" ################################################################################\ ''' snake_case = '''The MIT License Copyright (c) OpenAI (https://openai.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION ) class UpperCAmelCase ( datasets.Metric ): def __UpperCAmelCase ( self : Dict ): """simple docstring""" return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' ) ), '''references''': datasets.Value('''string''' ), } ) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , ) def __UpperCAmelCase ( self : int , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any]=[1, 1_0, 1_0_0] , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : int=3.0 ): """simple docstring""" if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError('''This metric is currently not supported on Windows.''' ) with ThreadPoolExecutor(max_workers=__lowerCamelCase ) as executor: _snake_case = [] _snake_case = Counter() _snake_case = 0 _snake_case = defaultdict(__lowerCamelCase ) for task_id, (candidates, test_case) in enumerate(zip(__lowerCamelCase , __lowerCamelCase ) ): for candidate in candidates: _snake_case = candidate + '''\n''' + test_case _snake_case = (test_program, timeout, task_id, completion_id[task_id]) _snake_case = executor.submit(__lowerCamelCase , *__lowerCamelCase ) futures.append(__lowerCamelCase ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(__lowerCamelCase ): _snake_case = future.result() results[result["task_id"]].append((result['''completion_id'''], result) ) _snake_case , _snake_case = [], [] for result in results.values(): result.sort() _snake_case = [r[1]['''passed'''] for r in result] total.append(len(__lowerCamelCase ) ) correct.append(sum(__lowerCamelCase ) ) _snake_case = np.array(__lowerCamelCase ) _snake_case = np.array(__lowerCamelCase ) _snake_case = k _snake_case = {f"""pass@{k}""": estimate_pass_at_k(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any: def estimator(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case = itertools.repeat(lowerCAmelCase_ , len(lowerCAmelCase_ ) ) else: assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) _snake_case = iter(lowerCAmelCase_ ) return np.array([estimator(int(lowerCAmelCase_ ) , int(lowerCAmelCase_ ) , lowerCAmelCase_ ) for n, c in zip(lowerCAmelCase_ , lowerCAmelCase_ )] )
103
'''simple docstring''' import math import tensorflow as tf from packaging import version def UpperCAmelCase_ ( __lowercase : Optional[Any] ) -> List[str]: '''simple docstring''' _UpperCAmelCase = tf.convert_to_tensor(__lowercase ) _UpperCAmelCase = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) )) return x * cdf def UpperCAmelCase_ ( __lowercase : Optional[Any] ) -> List[str]: '''simple docstring''' _UpperCAmelCase = tf.convert_to_tensor(__lowercase ) _UpperCAmelCase = tf.cast(math.pi , x.dtype ) _UpperCAmelCase = tf.cast(0.04_4715 , x.dtype ) _UpperCAmelCase = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__lowercase , 3 )) )) return x * cdf def UpperCAmelCase_ ( __lowercase : str ) -> List[str]: '''simple docstring''' _UpperCAmelCase = tf.convert_to_tensor(__lowercase ) return x * tf.tanh(tf.math.softplus(__lowercase ) ) def UpperCAmelCase_ ( __lowercase : Optional[int] ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = tf.convert_to_tensor(__lowercase ) _UpperCAmelCase = tf.cast(0.04_4715 , x.dtype ) _UpperCAmelCase = tf.cast(0.79_7884_5608 , x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def UpperCAmelCase_ ( __lowercase : Optional[Any] ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = tf.convert_to_tensor(__lowercase ) _UpperCAmelCase = tf.cast(1.702 , x.dtype ) return x * tf.math.sigmoid(coeff * x ) def UpperCAmelCase_ ( __lowercase : List[str] ) -> Optional[int]: '''simple docstring''' return tf.clip_by_value(_gelu(__lowercase ) , -10 , 10 ) def UpperCAmelCase_ ( __lowercase : int , __lowercase : str=-1 ) -> str: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = tf.split(__lowercase , 2 , axis=__lowercase ) return a * tf.math.sigmoid(__lowercase ) if version.parse(tf.version.VERSION) >= version.parse('''2.4'''): def UpperCAmelCase_ ( __lowercase : Dict ) -> int: '''simple docstring''' return tf.keras.activations.gelu(__lowercase , approximate=__lowercase ) __SCREAMING_SNAKE_CASE :Dict = tf.keras.activations.gelu __SCREAMING_SNAKE_CASE :str = approximate_gelu_wrap else: __SCREAMING_SNAKE_CASE :Optional[int] = _gelu __SCREAMING_SNAKE_CASE :str = _gelu_new __SCREAMING_SNAKE_CASE :Tuple = { '''gelu''': gelu, '''gelu_10''': gelu_aa, '''gelu_fast''': gelu_fast, '''gelu_new''': gelu_new, '''glu''': glu, '''mish''': mish, '''quick_gelu''': quick_gelu, '''relu''': tf.keras.activations.relu, '''sigmoid''': tf.keras.activations.sigmoid, '''silu''': tf.keras.activations.swish, '''swish''': tf.keras.activations.swish, '''tanh''': tf.keras.activations.tanh, } def UpperCAmelCase_ ( __lowercase : Optional[Any] ) -> Any: '''simple docstring''' if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(f'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
236
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _lowerCamelCase : Union[str, Any] = { "configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"], "tokenization_xlm": ["XLMTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : str = [ "XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMForMultipleChoice", "XLMForQuestionAnswering", "XLMForQuestionAnsweringSimple", "XLMForSequenceClassification", "XLMForTokenClassification", "XLMModel", "XLMPreTrainedModel", "XLMWithLMHeadModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Any = [ "TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLMForMultipleChoice", "TFXLMForQuestionAnsweringSimple", "TFXLMForSequenceClassification", "TFXLMForTokenClassification", "TFXLMMainLayer", "TFXLMModel", "TFXLMPreTrainedModel", "TFXLMWithLMHeadModel", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys _lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
324
'''simple docstring''' from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def __lowerCamelCase ( A__ ) -> Any: """simple docstring""" # A local function to see if a dot lands in the circle. def is_in_circle(A__ , A__ ) -> bool: UpperCamelCase = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle UpperCamelCase = mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(A__ ) ) # The ratio of the area for circle to square is pi/4. UpperCamelCase = proportion * 4 print(F"""The estimated value of pi is {pi_estimate}""" ) print(F"""The numpy value of pi is {pi}""" ) print(F"""The total error is {abs(pi - pi_estimate )}""" ) def __lowerCamelCase ( A__ , A__ , A__ = 0.0 , A__ = 1.0 , ) -> float: """simple docstring""" return mean( function_to_integrate(uniform(A__ , A__ ) ) for _ in range(A__ ) ) * (max_value - min_value) def __lowerCamelCase ( A__ , A__ = 0.0 , A__ = 1.0 ) -> None: """simple docstring""" def identity_function(A__ ) -> float: return x UpperCamelCase = area_under_curve_estimator( A__ , A__ , A__ , A__ ) UpperCamelCase = (max_value * max_value - min_value * min_value) / 2 print('******************' ) print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" ) print(F"""Estimated value is {estimated_value}""" ) print(F"""Expected value is {expected_value}""" ) print(F"""Total error is {abs(estimated_value - expected_value )}""" ) print('******************' ) def __lowerCamelCase ( A__ ) -> None: """simple docstring""" def function_to_integrate(A__ ) -> float: return sqrt(4.0 - x * x ) UpperCamelCase = area_under_curve_estimator( A__ , A__ , 0.0 , 2.0 ) print('******************' ) print('Estimating pi using area_under_curve_estimator' ) print(F"""Estimated value is {estimated_value}""" ) print(F"""Expected value is {pi}""" ) print(F"""Total error is {abs(estimated_value - pi )}""" ) print('******************' ) if __name__ == "__main__": import doctest doctest.testmod()
324
1
import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def _A ( self : List[str] ): SCREAMING_SNAKE_CASE : List[str] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(UpperCAmelCase_ , "tf_padding" ) ) self.parent.assertTrue(hasattr(UpperCAmelCase_ , "depth_multiplier" ) ) class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str]=13 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : int=0.25 , UpperCAmelCase_ : Union[str, Any]=8 , UpperCAmelCase_ : Dict=8 , UpperCAmelCase_ : Optional[int]=6 , UpperCAmelCase_ : Optional[Any]=32 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str="relu6" , UpperCAmelCase_ : List[str]=1280 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=10 , UpperCAmelCase_ : Optional[Any]=None , ): SCREAMING_SNAKE_CASE : Union[str, Any] = parent SCREAMING_SNAKE_CASE : int = batch_size SCREAMING_SNAKE_CASE : Dict = num_channels SCREAMING_SNAKE_CASE : Dict = image_size SCREAMING_SNAKE_CASE : int = depth_multiplier SCREAMING_SNAKE_CASE : str = depth_divisible_by SCREAMING_SNAKE_CASE : Union[str, Any] = min_depth SCREAMING_SNAKE_CASE : int = expand_ratio SCREAMING_SNAKE_CASE : Tuple = tf_padding SCREAMING_SNAKE_CASE : List[str] = output_stride SCREAMING_SNAKE_CASE : Optional[int] = first_layer_is_expansion SCREAMING_SNAKE_CASE : Any = finegrained_output SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act SCREAMING_SNAKE_CASE : Optional[int] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier ) SCREAMING_SNAKE_CASE : Any = classifier_dropout_prob SCREAMING_SNAKE_CASE : Dict = use_labels SCREAMING_SNAKE_CASE : int = is_training SCREAMING_SNAKE_CASE : Dict = num_labels SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range SCREAMING_SNAKE_CASE : Union[str, Any] = scope def _A ( self : Optional[Any] ): SCREAMING_SNAKE_CASE : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : List[Any] = None if self.use_labels: SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.num_labels ) SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) SCREAMING_SNAKE_CASE : Tuple = self.get_config() return config, pixel_values, labels, pixel_labels def _A ( self : Optional[int] ): return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def _A ( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : int = MobileNetVaModel(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() SCREAMING_SNAKE_CASE : Any = model(UpperCAmelCase_ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) self.parent.assertEqual( result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , ) def _A ( self : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any ): SCREAMING_SNAKE_CASE : str = self.num_labels SCREAMING_SNAKE_CASE : List[Any] = MobileNetVaForImageClassification(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() SCREAMING_SNAKE_CASE : Any = model(UpperCAmelCase_ , labels=UpperCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ): SCREAMING_SNAKE_CASE : int = self.num_labels SCREAMING_SNAKE_CASE : Dict = MobileNetVaForSemanticSegmentation(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() SCREAMING_SNAKE_CASE : Optional[int] = model(UpperCAmelCase_ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) SCREAMING_SNAKE_CASE : Any = model(UpperCAmelCase_ , labels=UpperCAmelCase_ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def _A ( self : Any ): SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = config_and_inputs SCREAMING_SNAKE_CASE : Optional[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : Any = ( (MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation) if is_torch_available() else () ) UpperCamelCase_ : List[Any] = ( { '''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification, '''image-segmentation''': MobileNetVaForSemanticSegmentation, } if is_torch_available() else {} ) UpperCamelCase_ : Any = False UpperCamelCase_ : List[str] = False UpperCamelCase_ : int = False UpperCamelCase_ : str = False def _A ( self : Optional[Any] ): SCREAMING_SNAKE_CASE : List[str] = MobileNetVaModelTester(self ) SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ ) def _A ( self : Optional[Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="MobileNetV2 does not use inputs_embeds" ) def _A ( self : List[Any] ): pass @unittest.skip(reason="MobileNetV2 does not support input and output embeddings" ) def _A ( self : Dict ): pass @unittest.skip(reason="MobileNetV2 does not output attentions" ) def _A ( self : Union[str, Any] ): pass def _A ( self : List[Any] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Tuple = model_class(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE : Dict = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE : Union[str, Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCAmelCase_ ) def _A ( self : Optional[int] ): SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase_ ) def _A ( self : List[Any] ): def check_hidden_states_output(UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] ): SCREAMING_SNAKE_CASE : Any = model_class(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE : List[Any] = outputs.hidden_states SCREAMING_SNAKE_CASE : Any = 16 self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : str = True check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE : List[Any] = True check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) def _A ( self : Any ): SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ ) def _A ( self : Optional[Any] ): SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase_ ) @slow def _A ( self : Optional[Any] ): for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : int = MobileNetVaModel.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @cached_property def _A ( self : Optional[int] ): return ( MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224" ) if is_vision_available() else None ) @slow def _A ( self : Tuple ): SCREAMING_SNAKE_CASE : int = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v2_1.0_224" ).to(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = self.default_image_processor SCREAMING_SNAKE_CASE : Optional[int] = prepare_img() SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=UpperCAmelCase_ , return_tensors="pt" ).to(UpperCAmelCase_ ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : str = model(**UpperCAmelCase_ ) # verify the logits SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 1001) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = torch.tensor([0.2_445, -1.1_993, 0.1_905] ).to(UpperCAmelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ) ) @slow def _A ( self : str ): SCREAMING_SNAKE_CASE : Optional[int] = MobileNetVaForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" ) SCREAMING_SNAKE_CASE : int = model.to(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = MobileNetVaImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" ) SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img() SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=UpperCAmelCase_ , return_tensors="pt" ).to(UpperCAmelCase_ ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : Optional[Any] = model(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = outputs.logits # verify the logits SCREAMING_SNAKE_CASE : Dict = torch.Size((1, 21, 65, 65) ) self.assertEqual(logits.shape , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = torch.tensor( [ [[17.5_790, 17.7_581, 18.3_355], [18.3_257, 18.4_230, 18.8_973], [18.6_169, 18.8_650, 19.2_187]], [[-2.1_595, -2.0_977, -2.3_741], [-2.4_226, -2.3_028, -2.6_835], [-2.7_819, -2.5_991, -2.7_706]], [[4.2_058, 4.8_317, 4.7_638], [4.4_136, 5.0_361, 4.9_383], [4.5_028, 4.9_644, 4.8_734]], ] , device=UpperCAmelCase_ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
62
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
387
0
import unittest import numpy as np def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , ) -> np.ndarray: '''simple docstring''' snake_case_ = np.shape(lowercase_ ) snake_case_ = np.shape(lowercase_ ) snake_case_ = np.shape(lowercase_ ) if shape_a[0] != shape_b[0]: snake_case_ = ( """Expected the same number of rows for A and B. """ f'''Instead found A of size {shape_a} and B of size {shape_b}''' ) raise ValueError(lowercase_ ) if shape_b[1] != shape_c[1]: snake_case_ = ( """Expected the same number of columns for B and C. """ f'''Instead found B of size {shape_b} and C of size {shape_c}''' ) raise ValueError(lowercase_ ) snake_case_ = pseudo_inv if a_inv is None: try: snake_case_ = np.linalg.inv(lowercase_ ) except np.linalg.LinAlgError: raise ValueError( """Input matrix A is not invertible. Cannot compute Schur complement.""" ) return mat_c - mat_b.T @ a_inv @ mat_b class __lowerCamelCase ( unittest.TestCase ): def lowerCAmelCase_ ( self ) -> None: snake_case_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) snake_case_ = np.array([[0, 3], [3, 0], [2, 3]] ) snake_case_ = np.array([[2, 1], [6, 3]] ) snake_case_ = schur_complement(lowerCamelCase , lowerCamelCase , lowerCamelCase ) snake_case_ = np.block([[a, b], [b.T, c]] ) snake_case_ = np.linalg.det(lowerCamelCase ) snake_case_ = np.linalg.det(lowerCamelCase ) snake_case_ = np.linalg.det(lowerCamelCase ) self.assertAlmostEqual(lowerCamelCase , det_a * det_s ) def lowerCAmelCase_ ( self ) -> None: snake_case_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) snake_case_ = np.array([[0, 3], [3, 0], [2, 3]] ) snake_case_ = np.array([[2, 1], [6, 3]] ) with self.assertRaises(lowerCamelCase ): schur_complement(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def lowerCAmelCase_ ( self ) -> None: snake_case_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) snake_case_ = np.array([[0, 3], [3, 0], [2, 3]] ) snake_case_ = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(lowerCamelCase ): schur_complement(lowerCamelCase , lowerCamelCase , lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
161
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer lowerCamelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCamelCase_ = { '''vocab_file''': { '''google/electra-small-generator''': ( '''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt''' ), '''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''', '''google/electra-large-generator''': ( '''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt''' ), '''google/electra-small-discriminator''': ( '''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt''' ), '''google/electra-base-discriminator''': ( '''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt''' ), '''google/electra-large-discriminator''': ( '''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''google/electra-small-generator''': ( '''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json''' ), '''google/electra-base-generator''': ( '''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json''' ), '''google/electra-large-generator''': ( '''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json''' ), '''google/electra-small-discriminator''': ( '''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json''' ), '''google/electra-base-discriminator''': ( '''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json''' ), '''google/electra-large-discriminator''': ( '''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json''' ), }, } lowerCamelCase_ = { '''google/electra-small-generator''': 512, '''google/electra-base-generator''': 512, '''google/electra-large-generator''': 512, '''google/electra-small-discriminator''': 512, '''google/electra-base-discriminator''': 512, '''google/electra-large-discriminator''': 512, } lowerCamelCase_ = { '''google/electra-small-generator''': {'''do_lower_case''': True}, '''google/electra-base-generator''': {'''do_lower_case''': True}, '''google/electra-large-generator''': {'''do_lower_case''': True}, '''google/electra-small-discriminator''': {'''do_lower_case''': True}, '''google/electra-base-discriminator''': {'''do_lower_case''': True}, '''google/electra-large-discriminator''': {'''do_lower_case''': True}, } class __lowerCamelCase ( __snake_case ): lowerCamelCase_ : List[Any] = VOCAB_FILES_NAMES lowerCamelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION lowerCamelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ : int = ElectraTokenizer def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase="[UNK]" , lowerCamelCase="[SEP]" , lowerCamelCase="[PAD]" , lowerCamelCase="[CLS]" , lowerCamelCase="[MASK]" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> Union[str, Any]: super().__init__( lowerCamelCase , tokenizer_file=lowerCamelCase , do_lower_case=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , tokenize_chinese_chars=lowerCamelCase , strip_accents=lowerCamelCase , **lowerCamelCase , ) snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , lowerCamelCase ) != do_lower_case or normalizer_state.get("""strip_accents""" , lowerCamelCase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , lowerCamelCase ) != tokenize_chinese_chars ): snake_case_ = getattr(lowerCamelCase , normalizer_state.pop("""type""" ) ) snake_case_ = do_lower_case snake_case_ = strip_accents snake_case_ = tokenize_chinese_chars snake_case_ = normalizer_class(**lowerCamelCase ) snake_case_ = do_lower_case def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase=None ) -> Dict: snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]: snake_case_ = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase ) return tuple(lowerCamelCase )
161
1
"""simple docstring""" from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class a ( UpperCAmelCase__ ): def __init__( self : Optional[int] , lowerCAmelCase : TransformeraDModel , lowerCAmelCase : AutoencoderKL , lowerCAmelCase : KarrasDiffusionSchedulers , lowerCAmelCase : Optional[Dict[int, str]] = None , ) -> List[Any]: '''simple docstring''' super().__init__() self.register_modules(transformer=lowerCAmelCase , vae=lowerCAmelCase , scheduler=lowerCAmelCase ) # create a imagenet -> id dictionary for easier use SCREAMING_SNAKE_CASE_: List[Any] ={} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(""",""" ): SCREAMING_SNAKE_CASE_: List[Any] =int(lowerCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] =dict(sorted(self.labels.items() ) ) def lowerCamelCase__ ( self : Any , lowerCAmelCase : Union[str, List[str]] ) -> List[int]: '''simple docstring''' if not isinstance(lowerCAmelCase , lowerCAmelCase ): SCREAMING_SNAKE_CASE_: List[Any] =list(lowerCAmelCase ) for l in label: if l not in self.labels: raise ValueError( f'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' ) return [self.labels[l] for l in label] @torch.no_grad() def __call__( self : Dict , lowerCAmelCase : List[int] , lowerCAmelCase : float = 4.0 , lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase : int = 50 , lowerCAmelCase : Optional[str] = "pil" , lowerCAmelCase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]: '''simple docstring''' SCREAMING_SNAKE_CASE_: Union[str, Any] =len(lowerCAmelCase ) SCREAMING_SNAKE_CASE_: Optional[Any] =self.transformer.config.sample_size SCREAMING_SNAKE_CASE_: Optional[int] =self.transformer.config.in_channels SCREAMING_SNAKE_CASE_: Tuple =randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowerCAmelCase , device=self.device , dtype=self.transformer.dtype , ) SCREAMING_SNAKE_CASE_: str =torch.cat([latents] * 2 ) if guidance_scale > 1 else latents SCREAMING_SNAKE_CASE_: str =torch.tensor(lowerCAmelCase , device=self.device ).reshape(-1 ) SCREAMING_SNAKE_CASE_: str =torch.tensor([1000] * batch_size , device=self.device ) SCREAMING_SNAKE_CASE_: Dict =torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(lowerCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): if guidance_scale > 1: SCREAMING_SNAKE_CASE_: str =latent_model_input[: len(lowerCAmelCase ) // 2] SCREAMING_SNAKE_CASE_: Tuple =torch.cat([half, half] , dim=0 ) SCREAMING_SNAKE_CASE_: Tuple =self.scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase ) SCREAMING_SNAKE_CASE_: Optional[int] =t if not torch.is_tensor(lowerCAmelCase ): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) SCREAMING_SNAKE_CASE_: Optional[int] =latent_model_input.device.type == """mps""" if isinstance(lowerCAmelCase , lowerCAmelCase ): SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.floataa if is_mps else torch.floataa else: SCREAMING_SNAKE_CASE_: Any =torch.intaa if is_mps else torch.intaa SCREAMING_SNAKE_CASE_: Tuple =torch.tensor([timesteps] , dtype=lowerCAmelCase , device=latent_model_input.device ) elif len(timesteps.shape ) == 0: SCREAMING_SNAKE_CASE_: Dict =timesteps[None].to(latent_model_input.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML SCREAMING_SNAKE_CASE_: str =timesteps.expand(latent_model_input.shape[0] ) # predict noise model_output SCREAMING_SNAKE_CASE_: Tuple =self.transformer( lowerCAmelCase , timestep=lowerCAmelCase , class_labels=lowerCAmelCase ).sample # perform guidance if guidance_scale > 1: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =torch.split(lowerCAmelCase , len(lowerCAmelCase ) // 2 , dim=0 ) SCREAMING_SNAKE_CASE_: Optional[int] =uncond_eps + guidance_scale * (cond_eps - uncond_eps) SCREAMING_SNAKE_CASE_: str =torch.cat([half_eps, half_eps] , dim=0 ) SCREAMING_SNAKE_CASE_: List[Any] =torch.cat([eps, rest] , dim=1 ) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =torch.split(lowerCAmelCase , lowerCAmelCase , dim=1 ) else: SCREAMING_SNAKE_CASE_: List[Any] =noise_pred # compute previous image: x_t -> x_t-1 SCREAMING_SNAKE_CASE_: Tuple =self.scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample if guidance_scale > 1: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =latent_model_input.chunk(2 , dim=0 ) else: SCREAMING_SNAKE_CASE_: str =latent_model_input SCREAMING_SNAKE_CASE_: Optional[Any] =1 / self.vae.config.scaling_factor * latents SCREAMING_SNAKE_CASE_: Tuple =self.vae.decode(lowerCAmelCase ).sample SCREAMING_SNAKE_CASE_: Tuple =(samples / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 SCREAMING_SNAKE_CASE_: Optional[Any] =samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": SCREAMING_SNAKE_CASE_: Tuple =self.numpy_to_pil(lowerCAmelCase ) if not return_dict: return (samples,) return ImagePipelineOutput(images=lowerCAmelCase )
409
"""simple docstring""" from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging _UpperCAmelCase = logging.get_logger(__name__) _UpperCAmelCase = { """t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""", """t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""", """t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""", """t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""", """t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""", } class a ( UpperCAmelCase__ ): UpperCamelCase : int = 't5' UpperCamelCase : int = ['past_key_values'] UpperCamelCase : Optional[Any] = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__( self : List[str] , lowerCAmelCase : str=3_2128 , lowerCAmelCase : List[str]=512 , lowerCAmelCase : str=64 , lowerCAmelCase : List[Any]=2048 , lowerCAmelCase : Union[str, Any]=6 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Union[str, Any]=8 , lowerCAmelCase : Any=32 , lowerCAmelCase : str=128 , lowerCAmelCase : int=0.1 , lowerCAmelCase : Optional[Any]=1E-6 , lowerCAmelCase : Dict=1.0 , lowerCAmelCase : Optional[Any]="relu" , lowerCAmelCase : List[str]=True , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Union[str, Any]=0 , lowerCAmelCase : str=1 , **lowerCAmelCase : Union[str, Any] , ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE_: Optional[Any] =vocab_size SCREAMING_SNAKE_CASE_: Optional[int] =d_model SCREAMING_SNAKE_CASE_: int =d_kv SCREAMING_SNAKE_CASE_: Any =d_ff SCREAMING_SNAKE_CASE_: Dict =num_layers SCREAMING_SNAKE_CASE_: Optional[Any] =( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry SCREAMING_SNAKE_CASE_: int =num_heads SCREAMING_SNAKE_CASE_: Optional[Any] =relative_attention_num_buckets SCREAMING_SNAKE_CASE_: List[Any] =relative_attention_max_distance SCREAMING_SNAKE_CASE_: List[str] =dropout_rate SCREAMING_SNAKE_CASE_: List[str] =layer_norm_epsilon SCREAMING_SNAKE_CASE_: List[Any] =initializer_factor SCREAMING_SNAKE_CASE_: Tuple =feed_forward_proj SCREAMING_SNAKE_CASE_: Union[str, Any] =use_cache SCREAMING_SNAKE_CASE_: Optional[Any] =self.feed_forward_proj.split("""-""" ) SCREAMING_SNAKE_CASE_: Dict =act_info[-1] SCREAMING_SNAKE_CASE_: Dict =act_info[0] == """gated""" if len(lowerCAmelCase ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase ) > 2: raise ValueError( f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' """Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """ """'gated-gelu' or 'relu'""" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": SCREAMING_SNAKE_CASE_: int ="""gelu_new""" super().__init__( pad_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , **lowerCAmelCase , ) class a ( UpperCAmelCase__ ): @property def lowerCamelCase__ ( self : str ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' SCREAMING_SNAKE_CASE_: Dict ={ """input_ids""": {0: """batch""", 1: """encoder_sequence"""}, """attention_mask""": {0: """batch""", 1: """encoder_sequence"""}, } if self.use_past: SCREAMING_SNAKE_CASE_: Tuple ="""past_encoder_sequence + sequence""" SCREAMING_SNAKE_CASE_: Dict ={0: """batch"""} SCREAMING_SNAKE_CASE_: Optional[Any] ={0: """batch""", 1: """past_decoder_sequence + sequence"""} else: SCREAMING_SNAKE_CASE_: Union[str, Any] ={0: """batch""", 1: """decoder_sequence"""} SCREAMING_SNAKE_CASE_: Union[str, Any] ={0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase , direction="""inputs""" ) return common_inputs @property def lowerCamelCase__ ( self : Any ) -> int: '''simple docstring''' return 13
409
1
'''simple docstring''' from collections import deque from .hash_table import HashTable class _lowerCAmelCase ( __lowerCamelCase ): '''simple docstring''' def __init__( self : Optional[int] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Any ) -> Optional[Any]: '''simple docstring''' super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def __lowercase ( self : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : List[str] ) -> Optional[int]: '''simple docstring''' _lowercase : List[str] = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(SCREAMING_SNAKE_CASE_ ) _lowercase : List[Any] = self.values[key] def __lowercase ( self : Any ) -> List[str]: '''simple docstring''' return ( sum(self.charge_factor - len(SCREAMING_SNAKE_CASE_ ) for slot in self.values ) / self.size_table * self.charge_factor ) def __lowercase ( self : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any]=None ) -> Tuple: '''simple docstring''' if not ( len(self.values[key] ) == self.charge_factor and self.values.count(SCREAMING_SNAKE_CASE_ ) == 0 ): return key return super()._collision_resolution(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
720
'''simple docstring''' import unittest from transformers import DonutProcessor lowerCamelCase__ = 'naver-clova-ix/donut-base' class _lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def __lowercase ( self : Tuple ) -> Optional[Any]: '''simple docstring''' _lowercase : Optional[Any] = DonutProcessor.from_pretrained(UpperCamelCase_ ) def __lowercase ( self : Tuple ) -> Tuple: '''simple docstring''' _lowercase : str = { '''name''': '''John Doe''', '''age''': '''99''', '''city''': '''Atlanta''', '''state''': '''GA''', '''zip''': '''30301''', '''phone''': '''123-4567''', '''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}], } _lowercase : List[str] = ( '''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>''' '''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>''' '''<s_nicknames><s_nickname>Johnny</s_nickname>''' '''<sep/><s_nickname>JD</s_nickname></s_nicknames>''' ) _lowercase : str = self.processor.tokenajson(UpperCamelCase_ ) self.assertDictEqual(UpperCamelCase_ , UpperCamelCase_ )
411
0
def UpperCAmelCase_ ( _UpperCAmelCase = 1_0_0 ): lowerCamelCase_: Optional[int] = 0 lowerCamelCase_: Tuple = 0 for i in range(1 , n + 1 ): sum_of_squares += i**2 sum_of_ints += i return sum_of_ints**2 - sum_of_squares if __name__ == "__main__": print(F"{solution() = }")
423
from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class a__ : _A = PegasusConfig _A = {} _A = "gelu" def __init__( self : int , A_ : int , A_ : List[str]=13 , A_ : Optional[Any]=7 , A_ : Optional[Any]=True , A_ : Optional[int]=False , A_ : int=99 , A_ : List[Any]=32 , A_ : Optional[int]=2 , A_ : Tuple=4 , A_ : Optional[Any]=37 , A_ : List[Any]=0.1 , A_ : Any=0.1 , A_ : Optional[Any]=40 , A_ : str=2 , A_ : Optional[Any]=1 , A_ : Tuple=0 , ) -> Optional[int]: """simple docstring""" lowerCamelCase_: str = parent lowerCamelCase_: int = batch_size lowerCamelCase_: Any = seq_length lowerCamelCase_: Optional[int] = is_training lowerCamelCase_: Union[str, Any] = use_labels lowerCamelCase_: List[Any] = vocab_size lowerCamelCase_: Dict = hidden_size lowerCamelCase_: str = num_hidden_layers lowerCamelCase_: List[str] = num_attention_heads lowerCamelCase_: List[Any] = intermediate_size lowerCamelCase_: List[Any] = hidden_dropout_prob lowerCamelCase_: Any = attention_probs_dropout_prob lowerCamelCase_: int = max_position_embeddings lowerCamelCase_: int = eos_token_id lowerCamelCase_: Optional[int] = pad_token_id lowerCamelCase_: str = bos_token_id def lowerCAmelCase ( self : List[Any] ) -> Any: """simple docstring""" lowerCamelCase_: List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) lowerCamelCase_: Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) lowerCamelCase_: str = tf.concat([input_ids, eos_tensor] , axis=1 ) lowerCamelCase_: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase_: List[Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) lowerCamelCase_: List[Any] = prepare_pegasus_inputs_dict(A_ , A_ , A_ ) return config, inputs_dict def lowerCAmelCase ( self : List[str] , A_ : Optional[Any] , A_ : str ) -> List[Any]: """simple docstring""" lowerCamelCase_: Tuple = TFPegasusModel(config=A_ ).get_decoder() lowerCamelCase_: Union[str, Any] = inputs_dict["""input_ids"""] lowerCamelCase_: int = input_ids[:1, :] lowerCamelCase_: List[Any] = inputs_dict["""attention_mask"""][:1, :] lowerCamelCase_: Union[str, Any] = inputs_dict["""head_mask"""] lowerCamelCase_: Tuple = 1 # first forward pass lowerCamelCase_: Optional[int] = model(A_ , attention_mask=A_ , head_mask=A_ , use_cache=A_ ) lowerCamelCase_ , lowerCamelCase_: Optional[int] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowerCamelCase_: Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCamelCase_: Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and lowerCamelCase_: Optional[Any] = tf.concat([input_ids, next_tokens] , axis=-1 ) lowerCamelCase_: Tuple = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) lowerCamelCase_: int = model(A_ , attention_mask=A_ )[0] lowerCamelCase_: Optional[int] = model(A_ , attention_mask=A_ , past_key_values=A_ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice lowerCamelCase_: Optional[int] = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) lowerCamelCase_: Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx] lowerCamelCase_: Optional[int] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(A_ , A_ , rtol=1e-3 ) def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ): if attention_mask is None: lowerCamelCase_: Optional[int] = tf.cast(tf.math.not_equal(_UpperCAmelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: lowerCamelCase_: List[str] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: lowerCamelCase_: int = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowerCamelCase_: List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowerCamelCase_: List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): _A = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () _A = (TFPegasusForConditionalGeneration,) if is_tf_available() else () _A = ( { "conversational": TFPegasusForConditionalGeneration, "feature-extraction": TFPegasusModel, "summarization": TFPegasusForConditionalGeneration, "text2text-generation": TFPegasusForConditionalGeneration, "translation": TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) _A = True _A = False _A = False def lowerCAmelCase ( self : Union[str, Any] ) -> Any: """simple docstring""" lowerCamelCase_: Tuple = TFPegasusModelTester(self ) lowerCamelCase_: Optional[int] = ConfigTester(self , config_class=A_ ) def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" self.config_tester.run_common_tests() def lowerCAmelCase ( self : Dict ) -> Tuple: """simple docstring""" lowerCamelCase_: List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*A_ ) @require_sentencepiece @require_tokenizers @require_tf class a__ ( unittest.TestCase ): _A = [ " PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.", " The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ", ] _A = [ "California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to" " reduce the risk of wildfires.", "N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.", ] # differs slightly from pytorch, likely due to numerical differences in linear layers _A = "google/pegasus-xsum" @cached_property def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_: Any = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def lowerCAmelCase ( self : str , **A_ : Optional[int] ) -> int: """simple docstring""" lowerCamelCase_: Union[str, Any] = self.translate_src_text(**A_ ) assert self.expected_text == generated_words def lowerCAmelCase ( self : str , **A_ : int ) -> Optional[Any]: """simple docstring""" lowerCamelCase_: List[Any] = self.tokenizer(self.src_text , **A_ , padding=A_ , return_tensors="""tf""" ) lowerCamelCase_: str = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=A_ , ) lowerCamelCase_: Tuple = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A_ ) return generated_words @slow def lowerCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" self._assert_generated_batch_equal_expected()
423
1
'''simple docstring''' import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging __snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCAmelCase ( __snake_case ): def __init__( self : str , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , ): """simple docstring""" super().__init__() self.register_modules( vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , ) def lowerCamelCase_ ( self : Dict , __magic_name__ : Optional[Union[str, int]] = "auto" ): """simple docstring""" if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCamelCase = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__magic_name__ ) def lowerCamelCase_ ( self : str ): """simple docstring""" self.enable_attention_slicing(__magic_name__ ) @torch.no_grad() def __call__( self : Dict , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_1_2 , __magic_name__ : int = 5_1_2 , __magic_name__ : int = 5_0 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , __magic_name__ : Optional[torch.FloatTensor] = None , **__magic_name__ : Optional[int] , ): """simple docstring""" if isinstance(__magic_name__ , __magic_name__ ): UpperCamelCase = 1 elif isinstance(__magic_name__ , __magic_name__ ): UpperCamelCase = len(__magic_name__ ) else: raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(__magic_name__ )}' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F'`height` and `width` have to be divisible by 8 but are {height} and {width}.' ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__magic_name__ , __magic_name__ ) or callback_steps <= 0) ): raise ValueError( F'`callback_steps` has to be a positive integer but is {callback_steps} of type' F' {type(__magic_name__ )}.' ) # get prompt text embeddings UpperCamelCase = self.tokenizer( __magic_name__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) UpperCamelCase = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" F' {self.tokenizer.model_max_length} tokens: {removed_text}' ) UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method UpperCamelCase , UpperCamelCase , UpperCamelCase = text_embeddings.shape UpperCamelCase = text_embeddings.repeat(1 , __magic_name__ , 1 ) UpperCamelCase = text_embeddings.view(bs_embed * num_images_per_prompt , __magic_name__ , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. UpperCamelCase = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: UpperCamelCase = 4_2 if negative_prompt is None: UpperCamelCase = [""""""] elif type(__magic_name__ ) is not type(__magic_name__ ): raise TypeError( F'`negative_prompt` should be the same type to `prompt`, but got {type(__magic_name__ )} !=' F' {type(__magic_name__ )}.' ) elif isinstance(__magic_name__ , __magic_name__ ): UpperCamelCase = [negative_prompt] elif batch_size != len(__magic_name__ ): raise ValueError( F'`negative_prompt`: {negative_prompt} has batch size {len(__magic_name__ )}, but `prompt`:' F' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches' """ the batch size of `prompt`.""" ) else: UpperCamelCase = negative_prompt UpperCamelCase = text_input_ids.shape[-1] UpperCamelCase = self.tokenizer( __magic_name__ , padding="""max_length""" , max_length=__magic_name__ , truncation=__magic_name__ , return_tensors="""pt""" , ) UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCamelCase = uncond_embeddings.shape[1] UpperCamelCase = uncond_embeddings.repeat(__magic_name__ , __magic_name__ , 1 ) UpperCamelCase = uncond_embeddings.view(batch_size * num_images_per_prompt , __magic_name__ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCamelCase = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. UpperCamelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) UpperCamelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 6_4, 6_4) UpperCamelCase = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps UpperCamelCase = torch.randn( __magic_name__ , generator=__magic_name__ , device="""cpu""" , dtype=__magic_name__ ).to(self.device ) UpperCamelCase = torch.randn(__magic_name__ , generator=__magic_name__ , device="""cpu""" , dtype=__magic_name__ ).to( self.device ) else: UpperCamelCase = torch.randn( __magic_name__ , generator=__magic_name__ , device=self.device , dtype=__magic_name__ ) UpperCamelCase = torch.randn(__magic_name__ , generator=__magic_name__ , device=self.device , dtype=__magic_name__ ) else: if latents_reference.shape != latents_shape: raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' ) UpperCamelCase = latents_reference.to(self.device ) UpperCamelCase = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images UpperCamelCase = (latents_shape[3] - latents_shape_reference[3]) // 2 UpperCamelCase = (latents_shape[2] - latents_shape_reference[2]) // 2 UpperCamelCase = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx UpperCamelCase = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy UpperCamelCase = 0 if dx < 0 else dx UpperCamelCase = 0 if dy < 0 else dy UpperCamelCase = max(-dx , 0 ) UpperCamelCase = max(-dy , 0 ) # import pdb # pdb.set_trace() UpperCamelCase = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(__magic_name__ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand UpperCamelCase = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler UpperCamelCase = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCamelCase = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCamelCase = {} if accepts_eta: UpperCamelCase = eta for i, t in enumerate(self.progress_bar(__magic_name__ ) ): # expand the latents if we are doing classifier free guidance UpperCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCamelCase = self.scheduler.scale_model_input(__magic_name__ , __magic_name__ ) # predict the noise residual UpperCamelCase = self.unet(__magic_name__ , __magic_name__ , encoder_hidden_states=__magic_name__ ).sample # perform guidance if do_classifier_free_guidance: UpperCamelCase , UpperCamelCase = noise_pred.chunk(2 ) UpperCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 UpperCamelCase = self.scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__magic_name__ , __magic_name__ , __magic_name__ ) UpperCamelCase = 1 / 0.18_215 * latents UpperCamelCase = self.vae.decode(__magic_name__ ).sample UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: UpperCamelCase = self.feature_extractor(self.numpy_to_pil(__magic_name__ ) , return_tensors="""pt""" ).to( self.device ) UpperCamelCase , UpperCamelCase = self.safety_checker( images=__magic_name__ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: UpperCamelCase = None if output_type == "pil": UpperCamelCase = self.numpy_to_pil(__magic_name__ ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=__magic_name__ , nsfw_content_detected=__magic_name__ )
709
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() __snake_case = logging.get_logger(__name__) def _lowercase ( SCREAMING_SNAKE_CASE_ : Optional[int] ): """simple docstring""" UpperCamelCase = DPTConfig() if "large" in checkpoint_url: UpperCamelCase = 1_024 UpperCamelCase = 4_096 UpperCamelCase = 24 UpperCamelCase = 16 UpperCamelCase = [5, 11, 17, 23] UpperCamelCase = [256, 512, 1_024, 1_024] UpperCamelCase = (1, 384, 384) if "ade" in checkpoint_url: UpperCamelCase = True UpperCamelCase = 150 UpperCamelCase = """huggingface/label-files""" UpperCamelCase = """ade20k-id2label.json""" UpperCamelCase = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="""dataset""" ) ) , """r""" ) ) UpperCamelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} UpperCamelCase = idalabel UpperCamelCase = {v: k for k, v in idalabel.items()} UpperCamelCase = [1, 150, 480, 480] return config, expected_shape def _lowercase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ): """simple docstring""" UpperCamelCase = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _lowercase ( SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): UpperCamelCase = name.replace("""pretrained.model""" , """dpt.encoder""" ) if "pretrained.model" in name: UpperCamelCase = name.replace("""pretrained.model""" , """dpt.embeddings""" ) if "patch_embed" in name: UpperCamelCase = name.replace("""patch_embed""" , """patch_embeddings""" ) if "pos_embed" in name: UpperCamelCase = name.replace("""pos_embed""" , """position_embeddings""" ) if "attn.proj" in name: UpperCamelCase = name.replace("""attn.proj""" , """attention.output.dense""" ) if "proj" in name and "project" not in name: UpperCamelCase = name.replace("""proj""" , """projection""" ) if "blocks" in name: UpperCamelCase = name.replace("""blocks""" , """layer""" ) if "mlp.fc1" in name: UpperCamelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: UpperCamelCase = name.replace("""mlp.fc2""" , """output.dense""" ) if "norm1" in name: UpperCamelCase = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: UpperCamelCase = name.replace("""norm2""" , """layernorm_after""" ) if "scratch.output_conv" in name: UpperCamelCase = name.replace("""scratch.output_conv""" , """head""" ) if "scratch" in name: UpperCamelCase = name.replace("""scratch""" , """neck""" ) if "layer1_rn" in name: UpperCamelCase = name.replace("""layer1_rn""" , """convs.0""" ) if "layer2_rn" in name: UpperCamelCase = name.replace("""layer2_rn""" , """convs.1""" ) if "layer3_rn" in name: UpperCamelCase = name.replace("""layer3_rn""" , """convs.2""" ) if "layer4_rn" in name: UpperCamelCase = name.replace("""layer4_rn""" , """convs.3""" ) if "refinenet" in name: UpperCamelCase = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 UpperCamelCase = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' ) if "out_conv" in name: UpperCamelCase = name.replace("""out_conv""" , """projection""" ) if "resConfUnit1" in name: UpperCamelCase = name.replace("""resConfUnit1""" , """residual_layer1""" ) if "resConfUnit2" in name: UpperCamelCase = name.replace("""resConfUnit2""" , """residual_layer2""" ) if "conv1" in name: UpperCamelCase = name.replace("""conv1""" , """convolution1""" ) if "conv2" in name: UpperCamelCase = name.replace("""conv2""" , """convolution2""" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: UpperCamelCase = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" ) if "pretrained.act_postprocess2.0.project.0" in name: UpperCamelCase = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" ) if "pretrained.act_postprocess3.0.project.0" in name: UpperCamelCase = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" ) if "pretrained.act_postprocess4.0.project.0" in name: UpperCamelCase = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" ) # resize blocks if "pretrained.act_postprocess1.3" in name: UpperCamelCase = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" ) if "pretrained.act_postprocess1.4" in name: UpperCamelCase = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" ) if "pretrained.act_postprocess2.3" in name: UpperCamelCase = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" ) if "pretrained.act_postprocess2.4" in name: UpperCamelCase = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" ) if "pretrained.act_postprocess3.3" in name: UpperCamelCase = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" ) if "pretrained.act_postprocess4.3" in name: UpperCamelCase = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" ) if "pretrained.act_postprocess4.4" in name: UpperCamelCase = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" ) if "pretrained" in name: UpperCamelCase = name.replace("""pretrained""" , """dpt""" ) if "bn" in name: UpperCamelCase = name.replace("""bn""" , """batch_norm""" ) if "head" in name: UpperCamelCase = name.replace("""head""" , """head.head""" ) if "encoder.norm" in name: UpperCamelCase = name.replace("""encoder.norm""" , """layernorm""" ) if "auxlayer" in name: UpperCamelCase = name.replace("""auxlayer""" , """auxiliary_head.head""" ) return name def _lowercase ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple ): """simple docstring""" for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCamelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' ) UpperCamelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase = in_proj_weight[: config.hidden_size, :] UpperCamelCase = in_proj_bias[: config.hidden_size] UpperCamelCase = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCamelCase = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCamelCase = in_proj_weight[ -config.hidden_size :, : ] UpperCamelCase = in_proj_bias[-config.hidden_size :] def _lowercase ( ): """simple docstring""" UpperCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCamelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) return im @torch.no_grad() def _lowercase ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] ): """simple docstring""" UpperCamelCase , UpperCamelCase = get_dpt_config(SCREAMING_SNAKE_CASE_ ) # load original state_dict from URL UpperCamelCase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" ) # remove certain keys remove_ignore_keys_(SCREAMING_SNAKE_CASE_ ) # rename keys for key in state_dict.copy().keys(): UpperCamelCase = state_dict.pop(SCREAMING_SNAKE_CASE_ ) UpperCamelCase = val # read in qkv matrices read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # load HuggingFace model UpperCamelCase = DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE_ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ ) model.load_state_dict(SCREAMING_SNAKE_CASE_ ) model.eval() # Check outputs on an image UpperCamelCase = 480 if """ade""" in checkpoint_url else 384 UpperCamelCase = DPTImageProcessor(size=SCREAMING_SNAKE_CASE_ ) UpperCamelCase = prepare_img() UpperCamelCase = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ) # forward pass UpperCamelCase = model(**SCREAMING_SNAKE_CASE_ ).logits if """ade""" in checkpoint_url else model(**SCREAMING_SNAKE_CASE_ ).predicted_depth # Assert logits UpperCamelCase = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] ) if "ade" in checkpoint_url: UpperCamelCase = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] ) assert outputs.shape == torch.Size(SCREAMING_SNAKE_CASE_ ) assert ( torch.allclose(outputs[0, 0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3] , SCREAMING_SNAKE_CASE_ ) ) Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) print(f'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if push_to_hub: print("""Pushing model to hub...""" ) model.push_to_hub( repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=SCREAMING_SNAKE_CASE_ , ) image_processor.push_to_hub( repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=SCREAMING_SNAKE_CASE_ , ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt", type=str, help="URL of the original DPT checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", action="store_true", ) parser.add_argument( "--model_name", default="dpt-large", type=str, help="Name of the model, in case you're pushing to the hub.", ) __snake_case = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
181
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) __lowercase : int = { '''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''], '''processing_trocr''': ['''TrOCRProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : Optional[Any] = [ '''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TrOCRForCausalLM''', '''TrOCRPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys __lowercase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
422
from __future__ import annotations def lowerCamelCase__ (__lowerCamelCase ): # This function is recursive _SCREAMING_SNAKE_CASE : Tuple = len(__lowerCamelCase ) # If the array contains only one element, we return it (it's the stop condition of # recursion) if array_length <= 1: return array # Else _SCREAMING_SNAKE_CASE : List[str] = array[0] _SCREAMING_SNAKE_CASE : str = False _SCREAMING_SNAKE_CASE : int = 1 _SCREAMING_SNAKE_CASE : list[int] = [] while not is_found and i < array_length: if array[i] < pivot: _SCREAMING_SNAKE_CASE : Tuple = True _SCREAMING_SNAKE_CASE : Optional[Any] = [element for element in array[i:] if element >= array[i]] _SCREAMING_SNAKE_CASE : Dict = longest_subsequence(__lowerCamelCase ) if len(__lowerCamelCase ) > len(__lowerCamelCase ): _SCREAMING_SNAKE_CASE : Optional[Any] = temp_array else: i += 1 _SCREAMING_SNAKE_CASE : int = [element for element in array[1:] if element >= pivot] _SCREAMING_SNAKE_CASE : Any = [pivot, *longest_subsequence(__lowerCamelCase )] if len(__lowerCamelCase ) > len(__lowerCamelCase ): return temp_array else: return longest_subseq if __name__ == "__main__": import doctest doctest.testmod()
249
0
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() a =logging.get_logger(__name__) a ={ 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k', 'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v', 'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q', 'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u', 'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v', 'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out', 'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos', 'self_attn.rotary_emb': 'encoder.embed_positions', 'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm', 'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1', 'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2', 'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv', 'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm', 'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm', 'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense', 'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense', 'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm', 'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense', 'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense', 'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } a =[ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]: '''simple docstring''' for attribute in key.split("." ): lowerCamelCase__ =getattr(lowerCAmelCase__ , lowerCAmelCase__ ) if weight_type is not None: lowerCamelCase__ =getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape else: lowerCamelCase__ =hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowerCamelCase__ =value elif weight_type == "weight_g": lowerCamelCase__ =value elif weight_type == "weight_v": lowerCamelCase__ =value elif weight_type == "bias": lowerCamelCase__ =value elif weight_type == "running_mean": lowerCamelCase__ =value elif weight_type == "running_var": lowerCamelCase__ =value elif weight_type == "num_batches_tracked": lowerCamelCase__ =value elif weight_type == "inv_freq": lowerCamelCase__ =value else: lowerCamelCase__ =value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]: '''simple docstring''' lowerCamelCase__ =[] lowerCamelCase__ =fairseq_model.state_dict() lowerCamelCase__ =hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): lowerCamelCase__ =False if "conv_layers" in name: load_conv_layer( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == "group" , ) lowerCamelCase__ =True else: for key, mapped_key in MAPPING.items(): lowerCamelCase__ ="wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: lowerCamelCase__ =True if "*" in mapped_key: lowerCamelCase__ =name.split(lowerCAmelCase__ )[0].split("." )[-2] lowerCamelCase__ =mapped_key.replace("*" , lowerCAmelCase__ ) if "pos_bias_u" in name: lowerCamelCase__ =None elif "pos_bias_v" in name: lowerCamelCase__ =None elif "weight_g" in name: lowerCamelCase__ ="weight_g" elif "weight_v" in name: lowerCamelCase__ ="weight_v" elif "bias" in name: lowerCamelCase__ ="bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj lowerCamelCase__ ="weight" elif "running_mean" in name: lowerCamelCase__ ="running_mean" elif "inv_freq" in name: lowerCamelCase__ ="inv_freq" elif "running_var" in name: lowerCamelCase__ ="running_var" elif "num_batches_tracked" in name: lowerCamelCase__ ="num_batches_tracked" else: lowerCamelCase__ =None set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) continue if not is_used: unused_weights.append(lowerCAmelCase__ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str: '''simple docstring''' lowerCamelCase__ =full_name.split("conv_layers." )[-1] lowerCamelCase__ =name.split("." ) lowerCamelCase__ =int(items[0] ) lowerCamelCase__ =int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowerCamelCase__ =value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowerCamelCase__ =value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) lowerCamelCase__ =value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) lowerCamelCase__ =value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(lowerCAmelCase__ ) @torch.no_grad() def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True ) -> str: '''simple docstring''' if config_path is not None: lowerCamelCase__ =WavaVecaConformerConfig.from_pretrained(lowerCAmelCase__ , hidden_act="swish" ) else: lowerCamelCase__ =WavaVecaConformerConfig() if "rope" in checkpoint_path: lowerCamelCase__ ="rotary" if is_finetuned: if dict_path: lowerCamelCase__ =Dictionary.load(lowerCAmelCase__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowerCamelCase__ =target_dict.pad_index lowerCamelCase__ =target_dict.bos_index lowerCamelCase__ =target_dict.eos_index lowerCamelCase__ =len(target_dict.symbols ) lowerCamelCase__ =os.path.join(lowerCAmelCase__ , "vocab.json" ) if not os.path.isdir(lowerCAmelCase__ ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowerCAmelCase__ ) ) return os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ ) lowerCamelCase__ =target_dict.indices # fairseq has the <pad> and <s> switched lowerCamelCase__ =0 lowerCamelCase__ =1 with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as vocab_handle: json.dump(lowerCAmelCase__ , lowerCAmelCase__ ) lowerCamelCase__ =WavaVecaCTCTokenizer( lowerCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowerCAmelCase__ , ) lowerCamelCase__ =True if config.feat_extract_norm == "layer" else False lowerCamelCase__ =WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , ) lowerCamelCase__ =WavaVecaProcessor(feature_extractor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ ) processor.save_pretrained(lowerCAmelCase__ ) lowerCamelCase__ =WavaVecaConformerForCTC(lowerCAmelCase__ ) else: lowerCamelCase__ =WavaVecaConformerForPreTraining(lowerCAmelCase__ ) if is_finetuned: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ =fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: lowerCamelCase__ =argparse.Namespace(task="audio_pretraining" ) lowerCamelCase__ =fairseq.tasks.setup_task(lowerCAmelCase__ ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase__ ) lowerCamelCase__ =model[0].eval() recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ , not is_finetuned ) hf_wavavec.save_pretrained(lowerCAmelCase__ ) if __name__ == "__main__": a =argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) a =parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
714
"""simple docstring""" import unittest import numpy as np from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING from transformers.pipelines import AudioClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_torchaudio, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class __UpperCAmelCase ( unittest.TestCase ): A__ : List[Any] = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING A__ : List[str] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): lowerCamelCase__ =AudioClassificationPipeline(model=_lowerCamelCase , feature_extractor=_lowerCamelCase ) # test with a raw waveform lowerCamelCase__ =np.zeros((34000,) ) lowerCamelCase__ =np.zeros((14000,) ) return audio_classifier, [audioa, audio] def _a ( self , _lowerCamelCase , _lowerCamelCase ): lowerCamelCase__ , lowerCamelCase__ =examples lowerCamelCase__ =audio_classifier(_lowerCamelCase ) # by default a model is initialized with num_labels=2 self.assertEqual( _lowerCamelCase , [ {"score": ANY(_lowerCamelCase ), "label": ANY(_lowerCamelCase )}, {"score": ANY(_lowerCamelCase ), "label": ANY(_lowerCamelCase )}, ] , ) lowerCamelCase__ =audio_classifier(_lowerCamelCase , top_k=1 ) self.assertEqual( _lowerCamelCase , [ {"score": ANY(_lowerCamelCase ), "label": ANY(_lowerCamelCase )}, ] , ) self.run_torchaudio(_lowerCamelCase ) @require_torchaudio def _a ( self , _lowerCamelCase ): import datasets # test with a local file lowerCamelCase__ =datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) lowerCamelCase__ =dataset[0]["audio"]["array"] lowerCamelCase__ =audio_classifier(_lowerCamelCase ) self.assertEqual( _lowerCamelCase , [ {"score": ANY(_lowerCamelCase ), "label": ANY(_lowerCamelCase )}, {"score": ANY(_lowerCamelCase ), "label": ANY(_lowerCamelCase )}, ] , ) @require_torch def _a ( self ): lowerCamelCase__ ="anton-l/wav2vec2-random-tiny-classifier" lowerCamelCase__ =pipeline("audio-classification" , model=_lowerCamelCase ) lowerCamelCase__ =np.ones((8000,) ) lowerCamelCase__ =audio_classifier(_lowerCamelCase , top_k=4 ) lowerCamelCase__ =[ {"score": 0.0_8_4_2, "label": "no"}, {"score": 0.0_8_3_8, "label": "up"}, {"score": 0.0_8_3_7, "label": "go"}, {"score": 0.0_8_3_4, "label": "right"}, ] lowerCamelCase__ =[ {"score": 0.0_8_4_5, "label": "stop"}, {"score": 0.0_8_4_4, "label": "on"}, {"score": 0.0_8_4_1, "label": "right"}, {"score": 0.0_8_3_4, "label": "left"}, ] self.assertIn(nested_simplify(_lowerCamelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] ) lowerCamelCase__ ={"array": np.ones((8000,) ), "sampling_rate": audio_classifier.feature_extractor.sampling_rate} lowerCamelCase__ =audio_classifier(_lowerCamelCase , top_k=4 ) self.assertIn(nested_simplify(_lowerCamelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] ) @require_torch @slow def _a ( self ): import datasets lowerCamelCase__ ="superb/wav2vec2-base-superb-ks" lowerCamelCase__ =pipeline("audio-classification" , model=_lowerCamelCase ) lowerCamelCase__ =datasets.load_dataset("anton-l/superb_dummy" , "ks" , split="test" ) lowerCamelCase__ =np.array(dataset[3]["speech"] , dtype=np.floataa ) lowerCamelCase__ =audio_classifier(_lowerCamelCase , top_k=4 ) self.assertEqual( nested_simplify(_lowerCamelCase , decimals=3 ) , [ {"score": 0.9_8_1, "label": "go"}, {"score": 0.0_0_7, "label": "up"}, {"score": 0.0_0_6, "label": "_unknown_"}, {"score": 0.0_0_1, "label": "down"}, ] , ) @require_tf @unittest.skip("Audio classification is not implemented for TF" ) def _a ( self ): pass
132
0
'''simple docstring''' from functools import reduce __snake_case = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def a ( __a = N ) -> int: '''simple docstring''' return max( # mypy cannot properly interpret reduce int(reduce(lambda __a , __a : str(int(__a ) * int(__a ) ) , n[i : i + 13] ) ) for i in range(len(__a ) - 12 ) ) if __name__ == "__main__": print(F"""{solution() = }""")
189
'''simple docstring''' import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class lowercase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Any = inspect.getfile(accelerate.test_utils ) UpperCamelCase__ :Any = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 UpperCamelCase__ :List[str] = test_metrics @require_cpu def lowerCAmelCase__ ( self ): '''simple docstring''' debug_launcher(self.test_metrics.main , num_processes=1 ) @require_cpu def lowerCAmelCase__ ( self ): '''simple docstring''' debug_launcher(self.test_metrics.main ) @require_single_gpu def lowerCAmelCase__ ( self ): '''simple docstring''' self.test_metrics.main() @require_multi_gpu def lowerCAmelCase__ ( self ): '''simple docstring''' print(F'''Found {torch.cuda.device_count()} devices.''' ) UpperCamelCase__ :Optional[Any] = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() )
189
1
import requests from bsa import BeautifulSoup def lowercase_ ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowercase = BeautifulSoup(requests.get(_UpperCamelCase , params=_UpperCamelCase ).content , '''html.parser''' ) __lowercase = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} ) __lowercase = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' ) return anchors[2].get_text() if __name__ == "__main__": a : Tuple = { '''title''': ( '''Precisely geometry controlled microsupercapacitors for ultrahigh areal ''' '''capacitance, volumetric capacitance, and energy density''' ), '''journal''': '''Chem. Mater.''', '''volume''': 30, '''pages''': '''3979-3990''', '''year''': 2018, '''hl''': '''en''', } print(get_citation('''https://scholar.google.com/scholar_lookup''', params=params))
527
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class lowerCamelCase_ ( lowerCAmelCase__ ): '''simple docstring''' __UpperCAmelCase = 42 __UpperCAmelCase = 42 __UpperCAmelCase = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
527
1
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __lowerCamelCase = logging.get_logger(__name__) class __A ( SCREAMING_SNAKE_CASE_ ): UpperCAmelCase__ = ["input_features"] def __init__( self : str , __snake_case : List[Any]=8_0 , __snake_case : Dict=1_6_0_0_0 , __snake_case : Union[str, Any]=1_6_0 , __snake_case : Optional[int]=3_0 , __snake_case : List[str]=4_0_0 , __snake_case : Any=0.0 , __snake_case : int=False , **__snake_case : Optional[Any] , ) -> Optional[Any]: super().__init__( feature_size=__snake_case , sampling_rate=__snake_case , padding_value=__snake_case , return_attention_mask=__snake_case , **__snake_case , ) __magic_name__: List[str] = n_fft __magic_name__: Dict = hop_length __magic_name__: Optional[Any] = chunk_length __magic_name__: List[Any] = chunk_length * sampling_rate __magic_name__: List[Any] = self.n_samples // hop_length __magic_name__: List[str] = sampling_rate __magic_name__: str = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__snake_case , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=__snake_case , norm="""slaney""" , mel_scale="""slaney""" , ) def lowerCamelCase__ ( self : str , __snake_case : np.array ) -> np.ndarray: __magic_name__: Tuple = spectrogram( __snake_case , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , ) __magic_name__: Optional[Any] = log_spec[:, :-1] __magic_name__: int = np.maximum(__snake_case , log_spec.max() - 8.0 ) __magic_name__: Union[str, Any] = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def lowerCamelCase__ ( __snake_case : List[np.ndarray] , __snake_case : List[np.ndarray] , __snake_case : float = 0.0 ) -> List[np.ndarray]: if attention_mask is not None: __magic_name__: str = np.array(__snake_case , np.intaa ) __magic_name__: Dict = [] for vector, length in zip(__snake_case , attention_mask.sum(-1 ) ): __magic_name__: Optional[int] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: __magic_name__: Dict = padding_value normed_input_values.append(__snake_case ) else: __magic_name__: List[Any] = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def __call__( self : Union[str, Any] , __snake_case : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __snake_case : bool = True , __snake_case : Optional[int] = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[str] = "max_length" , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , **__snake_case : Optional[Any] , ) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a' F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input' F' was sampled with {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) __magic_name__: Dict = isinstance(__snake_case , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'Only mono-channel audio is supported for input to {self}' ) __magic_name__: Optional[Any] = is_batched_numpy or ( isinstance(__snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __magic_name__: List[Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__snake_case , np.ndarray ): __magic_name__: List[str] = np.asarray(__snake_case , dtype=np.floataa ) elif isinstance(__snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __magic_name__: str = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __magic_name__: str = [np.asarray([raw_speech] ).T] __magic_name__: str = BatchFeature({"""input_features""": raw_speech} ) # convert into correct format for padding __magic_name__: Optional[Any] = self.pad( __snake_case , padding=__snake_case , max_length=max_length if max_length else self.n_samples , truncation=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: __magic_name__: List[Any] = self.zero_mean_unit_var_norm( padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , ) __magic_name__: List[Any] = np.stack(padded_inputs["""input_features"""] , axis=0 ) # make sure list is in array format __magic_name__: int = padded_inputs.get("""input_features""" ).transpose(2 , 0 , 1 ) __magic_name__: Union[str, Any] = [self._np_extract_fbank_features(__snake_case ) for waveform in input_features[0]] if isinstance(input_features[0] , __snake_case ): __magic_name__: Dict = [np.asarray(__snake_case , dtype=np.floataa ) for feature in input_features] else: __magic_name__: List[str] = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) __magic_name__: Optional[Any] = padded_inputs["""attention_mask"""][:, :: self.hop_length] if return_tensors is not None: __magic_name__: Optional[int] = padded_inputs.convert_to_tensors(__snake_case ) return padded_inputs def lowerCamelCase__ ( self : List[str] ) -> Dict[str, Any]: __magic_name__: Tuple = copy.deepcopy(self.__dict__ ) __magic_name__: Union[str, Any] = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
96
UpperCAmelCase_ : Tuple = 0 # The first color of the flag. UpperCAmelCase_ : Any = 1 # The second color of the flag. UpperCAmelCase_ : str = 2 # The third color of the flag. UpperCAmelCase_ : Tuple = (red, white, blue) def lowerCAmelCase_ ( lowerCamelCase ): if not sequence: return [] if len(lowerCamelCase ) == 1: return list(lowerCamelCase ) __magic_name__ : int =0 __magic_name__ : str =len(lowerCamelCase ) - 1 __magic_name__ : Optional[Any] =0 while mid <= high: if sequence[mid] == colors[0]: __magic_name__ , __magic_name__ : Tuple =sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: __magic_name__ , __magic_name__ : Optional[Any] =sequence[high], sequence[mid] high -= 1 else: __magic_name__ : Optional[int] =F"The elements inside the sequence must contains only {colors} values" raise ValueError(lowerCamelCase ) return sequence if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ : List[Any] = input("Enter numbers separated by commas:\n").strip() UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(",")] print(F"""{dutch_national_flag_sort(unsorted)}""")
21
0
from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { "microsoft/xprophetnet-large-wiki100-cased": ( "https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json" ), } class _snake_case ( _UpperCAmelCase): UpperCamelCase__ : Optional[Any] ="""xlm-prophetnet""" UpperCamelCase__ : Optional[Any] =["""past_key_values"""] UpperCamelCase__ : Optional[Any] ={ """num_attention_heads""": """num_encoder_attention_heads""", } def __init__( self : List[Any], __lowercase : Optional[float] = 0.1, __lowercase : Optional[Union[str, Callable]] = "gelu", __lowercase : Optional[int] = 3_0522, __lowercase : Optional[int] = 1024, __lowercase : Optional[int] = 4096, __lowercase : Optional[int] = 12, __lowercase : Optional[int] = 16, __lowercase : Optional[int] = 4096, __lowercase : Optional[int] = 12, __lowercase : Optional[int] = 16, __lowercase : Optional[float] = 0.1, __lowercase : Optional[float] = 0.1, __lowercase : Optional[int] = 512, __lowercase : Optional[float] = 0.02, __lowercase : Optional[bool] = True, __lowercase : Optional[bool] = True, __lowercase : Optional[int] = 0, __lowercase : Optional[int] = 2, __lowercase : Optional[int] = 32, __lowercase : Optional[int] = 128, __lowercase : Optional[bool] = False, __lowercase : Optional[float] = 0.0, __lowercase : Optional[bool] = True, __lowercase : Optional[int] = 0, __lowercase : Optional[int] = 1, __lowercase : Optional[int] = 2, **__lowercase : List[Any], ): lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = encoder_ffn_dim lowercase__ = num_encoder_layers lowercase__ = num_encoder_attention_heads lowercase__ = decoder_ffn_dim lowercase__ = num_decoder_layers lowercase__ = num_decoder_attention_heads lowercase__ = max_position_embeddings lowercase__ = init_std # Normal(0, this parameter) lowercase__ = activation_function # parameters for xlmprophetnet lowercase__ = ngram lowercase__ = num_buckets lowercase__ = relative_max_distance lowercase__ = disable_ngram_loss lowercase__ = eps # 3 Types of Dropout lowercase__ = attention_dropout lowercase__ = activation_dropout lowercase__ = dropout lowercase__ = use_cache super().__init__( pad_token_id=lowerCamelCase_, bos_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, is_encoder_decoder=lowerCamelCase_, add_cross_attention=lowerCamelCase_, decoder_start_token_id=lowerCamelCase_, **lowerCamelCase_, ) @property def A__ ( self : List[Any] ): return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def A__ ( self : Union[str, Any], __lowercase : Optional[Any] ): raise NotImplementedError( "This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and" " `num_decoder_layers`." )
718
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ = { """configuration_xmod""": [ """XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XmodConfig""", """XmodOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""", """XmodForCausalLM""", """XmodForMaskedLM""", """XmodForMultipleChoice""", """XmodForQuestionAnswering""", """XmodForSequenceClassification""", """XmodForTokenClassification""", """XmodModel""", """XmodPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
37
0
'''simple docstring''' import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def lowercase__ ( ): '''simple docstring''' __lowercase = ArgumentParser( description=( """PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes""" ) ) # Optional arguments for the launch helper parser.add_argument("""--num_cores""" , type=__UpperCamelCase , default=1 , help="""Number of TPU cores to use (1 or 8).""" ) # positional parser.add_argument( """training_script""" , type=__UpperCamelCase , help=( """The full path to the single TPU training """ """program/script to be launched in parallel, """ """followed by all the arguments for the """ """training script""" ) , ) # rest from the training program parser.add_argument("""training_script_args""" , nargs=__UpperCamelCase ) return parser.parse_args() def lowercase__ ( ): '''simple docstring''' __lowercase = parse_args() # Import training_script as a module. __lowercase = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) __lowercase = script_fpath.stem __lowercase = importlib.import_module(__UpperCamelCase ) # Patch sys.argv __lowercase = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
566
'''simple docstring''' from __future__ import annotations def lowercase__ ( __UpperCamelCase : list[int] ): '''simple docstring''' if len(__UpperCamelCase ) == 0: return array __lowercase , __lowercase = min(__UpperCamelCase ), max(__UpperCamelCase ) # Compute the variables __lowercase = _max - _min + 1 __lowercase , __lowercase = [0] * holes_range, [0] * holes_range # Make the sorting. for i in array: __lowercase = i - _min __lowercase = i holes_repeat[index] += 1 # Makes the array back by replacing the numbers. __lowercase = 0 for i in range(__UpperCamelCase ): while holes_repeat[i] > 0: __lowercase = holes[i] index += 1 holes_repeat[i] -= 1 # Returns the sorted array. return array if __name__ == "__main__": import doctest doctest.testmod() snake_case : Dict = input('Enter numbers separated by comma:\n') snake_case : List[str] = [int(x) for x in user_input.split(',')] print(pigeon_sort(unsorted))
566
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _UpperCamelCase = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ """SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """SwinForImageClassification""", """SwinForMaskedImageModeling""", """SwinModel""", """SwinPreTrainedModel""", """SwinBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ """TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFSwinForImageClassification""", """TFSwinForMaskedImageModeling""", """TFSwinModel""", """TFSwinPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
"""simple docstring""" from __future__ import annotations def _a ( _snake_case ): """simple docstring""" return len(set(_snake_case ) ) == len(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
74
1
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class _snake_case ( _lowercase ): def __init__( self: List[Any] , __lowerCamelCase: str , __lowerCamelCase: List[Any] ) -> str: __UpperCAmelCase : List[Any] = params __UpperCAmelCase : Optional[Any] = np.array(UpperCAmelCase__ ) __UpperCAmelCase : List[Any] = np.array([len(UpperCAmelCase__ ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self: int , __lowerCamelCase: Any ) -> str: return (self.token_ids[index], self.lengths[index]) def __len__( self: str ) -> List[str]: return len(self.lengths ) def _lowerCamelCase ( self: Dict ) -> Dict: assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def _lowerCamelCase ( self: Any ) -> Optional[Any]: __UpperCAmelCase : int = self.params.max_model_input_size __UpperCAmelCase : Tuple = self.lengths > max_len logger.info(f'''Splitting {sum(UpperCAmelCase__ )} too long sequences.''' ) def divide_chunks(__lowerCamelCase: Any , __lowerCamelCase: str ): return [l[i : i + n] for i in range(0 , len(UpperCAmelCase__ ) , UpperCAmelCase__ )] __UpperCAmelCase : Optional[Any] = [] __UpperCAmelCase : List[Any] = [] if self.params.mlm: __UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"] else: __UpperCAmelCase , __UpperCAmelCase : List[Any] = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: __UpperCAmelCase : Any = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: __UpperCAmelCase : Dict = np.insert(UpperCAmelCase__ , 0 , UpperCAmelCase__ ) if sub_s[-1] != sep_id: __UpperCAmelCase : Tuple = np.insert(UpperCAmelCase__ , len(UpperCAmelCase__ ) , UpperCAmelCase__ ) assert len(UpperCAmelCase__ ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(UpperCAmelCase__ ) new_tok_ids.extend(UpperCAmelCase__ ) new_lengths.extend([len(UpperCAmelCase__ ) for l in sub_seqs] ) __UpperCAmelCase : List[str] = np.array(UpperCAmelCase__ ) __UpperCAmelCase : int = np.array(UpperCAmelCase__ ) def _lowerCamelCase ( self: int ) -> str: __UpperCAmelCase : Optional[Any] = len(self ) __UpperCAmelCase : str = self.lengths > 11 __UpperCAmelCase : Any = self.token_ids[indices] __UpperCAmelCase : Union[str, Any] = self.lengths[indices] __UpperCAmelCase : List[str] = len(self ) logger.info(f'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' ) def _lowerCamelCase ( self: Optional[int] ) -> Optional[Any]: if "unk_token" not in self.params.special_tok_ids: return else: __UpperCAmelCase : List[Any] = self.params.special_tok_ids["unk_token"] __UpperCAmelCase : List[str] = len(self ) __UpperCAmelCase : List[Any] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) __UpperCAmelCase : Union[str, Any] = (unk_occs / self.lengths) < 0.5 __UpperCAmelCase : str = self.token_ids[indices] __UpperCAmelCase : int = self.lengths[indices] __UpperCAmelCase : Any = len(self ) logger.info(f'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' ) def _lowerCamelCase ( self: Dict ) -> Union[str, Any]: if not self.params.is_master: return logger.info(f'''{len(self )} sequences''' ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def _lowerCamelCase ( self: Any , __lowerCamelCase: Union[str, Any] ) -> Optional[int]: __UpperCAmelCase : Optional[Any] = [t[0] for t in batch] __UpperCAmelCase : List[Any] = [t[1] for t in batch] assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) # Max for paddings __UpperCAmelCase : List[Any] = max(UpperCAmelCase__ ) # Pad token ids if self.params.mlm: __UpperCAmelCase : List[str] = self.params.special_tok_ids["pad_token"] else: __UpperCAmelCase : int = self.params.special_tok_ids["unk_token"] __UpperCAmelCase : int = [list(t.astype(UpperCAmelCase__ ) ) + [pad_idx] * (max_seq_len_ - len(UpperCAmelCase__ )) for t in token_ids] assert len(tk_ ) == len(UpperCAmelCase__ ) assert all(len(UpperCAmelCase__ ) == max_seq_len_ for t in tk_ ) __UpperCAmelCase : List[Any] = torch.tensor(tk_ ) # (bs, max_seq_len_) __UpperCAmelCase : Tuple = torch.tensor(UpperCAmelCase__ ) # (bs) return tk_t, lg_t
382
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase =logging.get_logger(__name__) __lowerCAmelCase ={ "SCUT-DLVCLab/lilt-roberta-en-base": ( "https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json" ), } class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = "lilt" def __init__( self , UpperCAmelCase__=3_0522 , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=2 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=0 , UpperCAmelCase__="absolute" , UpperCAmelCase__=None , UpperCAmelCase__=4 , UpperCAmelCase__=1024 , **UpperCAmelCase__ , ) -> Optional[Any]: super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) a_ = vocab_size a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = hidden_act a_ = intermediate_size a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = max_position_embeddings a_ = type_vocab_size a_ = initializer_range a_ = layer_norm_eps a_ = position_embedding_type a_ = classifier_dropout a_ = channel_shrink_ratio a_ = max_ad_position_embeddings
697
0
import unittest from dataclasses import dataclass import pytest from accelerate.commands.config.config_args import SageMakerConfig from accelerate.utils import ComputeEnvironment from accelerate.utils.launch import _convert_nargs_to_dict @dataclass class lowerCamelCase__ ( __lowercase): '''simple docstring''' _A = ComputeEnvironment.AMAZON_SAGEMAKER _A = True _A = 'ml.p3.2xlarge' _A = 'accelerate_sagemaker_execution_role' _A = 'hf-sm' _A = 'us-east-1' _A = 1 _A = 'accelerate-sagemaker-1' _A = '1.6' _A = '4.4' _A = 'train.py' _A = [ '--model_name_or_path', 'bert', '--do_train', 'False', '--epochs', '3', '--learning_rate', '5e-5', '--max_steps', '50.5', ] _A = [ '--model_name_or_path', 'bert', '--do_train', '--do_test', 'False', '--do_predict', '--epochs', '3', '--learning_rate', '5e-5', '--max_steps', '50.5', ] class lowerCamelCase__ ( unittest.TestCase): '''simple docstring''' def _lowerCamelCase ( self :Tuple ) -> str: # If no defaults are changed, `to_kwargs` returns an empty dict. __UpperCamelCase : str = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args ) assert isinstance(converted_args["model_name_or_path"] , a ) assert isinstance(converted_args["do_train"] , a ) assert isinstance(converted_args["epochs"] , a ) assert isinstance(converted_args["learning_rate"] , a ) assert isinstance(converted_args["max_steps"] , a ) with pytest.raises(a ): _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
94
import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline lowercase : Any = argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False) parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not') parser.add_argument('--steps', default=None, type=int, help='Num inference steps') lowercase : Tuple = parser.parse_args() lowercase : str = 'cpu' lowercase : List[Any] = 'a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings' lowercase : Any = 'path-to-your-trained-model' lowercase : List[Any] = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: lowercase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) lowercase : Dict = pipe.to(device) # to channels last lowercase : int = pipe.unet.to(memory_format=torch.channels_last) lowercase : int = pipe.vae.to(memory_format=torch.channels_last) lowercase : Dict = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: lowercase : List[str] = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex lowercase : Any = torch.randn(2, 4, 64, 64) lowercase : Union[str, Any] = torch.rand(1) * 999 lowercase : List[Any] = torch.randn(2, 77, 768) lowercase : List[Any] = (sample, timestep, encoder_hidden_status) try: lowercase : List[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: lowercase : str = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) lowercase : Dict = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) lowercase : str = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: lowercase : Dict = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute lowercase : Any = 666 lowercase : int = torch.Generator(device).manual_seed(seed) lowercase : List[str] = {'generator': generator} if args.steps is not None: lowercase : List[str] = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): lowercase : Dict = pipe(prompt, **generate_kwargs).images[0] # save image image.save('generated.png')
94
1
import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class _a ( unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase( self ): with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights __A : List[str] = FlaxDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=__lowercase , cache_dir=__lowercase ) __A : Optional[int] = [t[-1] for t in os.walk(os.path.join(__lowercase , os.listdir(__lowercase )[0] , "snapshots" ) )] __A : List[Any] = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith(".bin" ) for f in files ) @slow @require_flax class _a ( unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase( self ): __A : List[str] = FlaxStableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=__lowercase ) __A : Optional[int] = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) __A : Any = jax.random.PRNGKey(0 ) __A : Union[str, Any] = 4 __A : Union[str, Any] = jax.device_count() __A : List[str] = num_samples * [prompt] __A : List[Any] = pipeline.prepare_inputs(__lowercase ) # shard inputs and rng __A : str = replicate(__lowercase ) __A : Any = jax.random.split(__lowercase , __lowercase ) __A : int = shard(__lowercase ) __A : Tuple = pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase ).images assert images.shape == (num_samples, 1, 64, 64, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1e-3 assert np.abs(np.abs(__lowercase , dtype=np.floataa ).sum() - 49_947.875 ) < 5e-1 __A : Tuple = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) ) assert len(__lowercase ) == num_samples def __UpperCAmelCase( self ): __A : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=__lowercase ) __A : str = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) __A : List[str] = jax.random.PRNGKey(0 ) __A : Dict = 50 __A : Optional[Any] = jax.device_count() __A : Any = num_samples * [prompt] __A : Any = pipeline.prepare_inputs(__lowercase ) # shard inputs and rng __A : Dict = replicate(__lowercase ) __A : Optional[Any] = jax.random.split(__lowercase , __lowercase ) __A : str = shard(__lowercase ) __A : Any = pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1e-3 assert np.abs((np.abs(__lowercase , dtype=np.floataa ).sum() - 2_383_808.2) ) < 5e-1 def __UpperCAmelCase( self ): __A : Dict = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=__lowercase ) __A : Optional[int] = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) __A : Union[str, Any] = jax.random.PRNGKey(0 ) __A : List[Any] = 50 __A : Tuple = jax.device_count() __A : str = num_samples * [prompt] __A : str = pipeline.prepare_inputs(__lowercase ) # shard inputs and rng __A : int = replicate(__lowercase ) __A : List[str] = jax.random.split(__lowercase , __lowercase ) __A : List[Any] = shard(__lowercase ) __A : List[Any] = pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1e-3 assert np.abs((np.abs(__lowercase , dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1 def __UpperCAmelCase( self ): __A : str = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa ) __A : int = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) __A : Union[str, Any] = jax.random.PRNGKey(0 ) __A : str = 50 __A : Dict = jax.device_count() __A : Optional[int] = num_samples * [prompt] __A : Dict = pipeline.prepare_inputs(__lowercase ) # shard inputs and rng __A : Union[str, Any] = replicate(__lowercase ) __A : List[str] = jax.random.split(__lowercase , __lowercase ) __A : int = shard(__lowercase ) __A : Any = pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1e-3 assert np.abs((np.abs(__lowercase , dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1 def __UpperCAmelCase( self ): __A : List[str] = FlaxDDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , set_alpha_to_one=__lowercase , steps_offset=1 , ) __A : Tuple = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=__lowercase , safety_checker=__lowercase , ) __A : Any = scheduler.create_state() __A : List[str] = scheduler_state __A : Tuple = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) __A : str = jax.random.PRNGKey(0 ) __A : Any = 50 __A : Union[str, Any] = jax.device_count() __A : List[Any] = num_samples * [prompt] __A : List[Any] = pipeline.prepare_inputs(__lowercase ) # shard inputs and rng __A : Dict = replicate(__lowercase ) __A : Optional[Any] = jax.random.split(__lowercase , __lowercase ) __A : Optional[int] = shard(__lowercase ) __A : Tuple = pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1e-3 assert np.abs((np.abs(__lowercase , dtype=np.floataa ).sum() - 2_347_693.5) ) < 5e-1 def __UpperCAmelCase( self ): __A : str = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) __A : str = jax.device_count() __A : Union[str, Any] = num_samples * [prompt] __A : Any = jax.random.split(jax.random.PRNGKey(0 ) , __lowercase ) __A : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=__lowercase , ) __A : Optional[int] = replicate(__lowercase ) __A : Optional[Any] = pipeline.prepare_inputs(__lowercase ) __A : Any = shard(__lowercase ) __A : List[Any] = pipeline(__lowercase , __lowercase , __lowercase , jit=__lowercase ).images assert images.shape == (num_samples, 1, 512, 512, 3) __A : int = images[2, 0, 256, 10:17, 1] # With memory efficient attention __A : List[str] = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=__lowercase , use_memory_efficient_attention=__lowercase , ) __A : List[Any] = replicate(__lowercase ) __A : Tuple = pipeline.prepare_inputs(__lowercase ) __A : List[Any] = shard(__lowercase ) __A : List[Any] = pipeline(__lowercase , __lowercase , __lowercase , jit=__lowercase ).images assert images_eff.shape == (num_samples, 1, 512, 512, 3) __A : int = images[2, 0, 256, 10:17, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice ).max() < 1e-2
520
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin A__ = ''' Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning. In March 2021, Hugging Face raised $40 million in a Series B funding round.[3] On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5] ''' class a ( unittest.TestCase , __lowerCamelCase ): def __lowerCamelCase ( self :str ): snake_case__ : Union[str, Any] = load_tool('''text-question-answering''' ) self.tool.setup() snake_case__ : int = load_tool('''text-question-answering''' ,remote=__lowercase ) def __lowerCamelCase ( self :List[Any] ): snake_case__ : int = self.tool(__lowercase ,'''What did Hugging Face do in April 2021?''' ) self.assertEqual(__lowercase ,'''launched the BigScience Research Workshop''' ) def __lowerCamelCase ( self :Optional[int] ): snake_case__ : int = self.remote_tool(__lowercase ,'''What did Hugging Face do in April 2021?''' ) self.assertEqual(__lowercase ,'''launched the BigScience Research Workshop''' ) def __lowerCamelCase ( self :Union[str, Any] ): snake_case__ : int = self.tool(text=__lowercase ,question='''What did Hugging Face do in April 2021?''' ) self.assertEqual(__lowercase ,'''launched the BigScience Research Workshop''' ) def __lowerCamelCase ( self :Optional[Any] ): snake_case__ : Optional[Any] = self.remote_tool(text=__lowercase ,question='''What did Hugging Face do in April 2021?''' ) self.assertEqual(__lowercase ,'''launched the BigScience Research Workshop''' )
252
0
import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class __snake_case ( _A ): __lowerCamelCase = """M-CLIP""" def __init__( self , __UpperCamelCase=1024 , __UpperCamelCase=768 , **__UpperCamelCase ) -> Optional[int]: '''simple docstring''' snake_case__ : List[str] = transformerDimSize snake_case__ : Dict = imageDimSize super().__init__(**__lowerCamelCase ) class __snake_case ( _A ): __lowerCamelCase = MCLIPConfig def __init__( self , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' super().__init__(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ) snake_case__ : Dict = XLMRobertaModel(__lowerCamelCase ) snake_case__ : List[str] = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims ) def __a ( self , __UpperCamelCase , __UpperCamelCase ) -> int: '''simple docstring''' snake_case__ : Any = self.transformer(input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase )[0] snake_case__ : Any = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None] return self.LinearTransformation(__lowerCamelCase ), embs
717
import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel lowerCAmelCase__ : List[str] = HfApi() lowerCAmelCase__ : str = {} # fmt: off lowerCAmelCase__ : int = torch.tensor([ -0.75_15, -1.68_83, 0.24_20, 0.03_00, 0.63_47, 1.34_33, -1.17_43, -3.74_67, 1.23_42, -2.24_85, 0.46_36, 0.80_76, -0.79_91, 0.39_69, 0.84_98, 0.91_89, -1.88_87, -3.35_22, 0.76_39, 0.20_40, 0.62_71, -2.71_48, -1.63_16, 3.08_39, 0.31_86, 0.27_21, -0.97_59, -1.24_61, 2.62_57, 1.35_57 ]) lowerCAmelCase__ : Dict = torch.tensor([ -2.36_39, -2.53_44, 0.00_54, -0.66_74, 1.59_90, 1.01_58, 0.31_24, -2.14_36, 1.87_95, -2.54_29, -0.15_66, -0.39_73, 1.24_90, 2.64_47, 1.22_83, -0.52_08, -2.81_54, -3.51_19, 2.38_38, 1.20_33, 1.72_01, -2.12_56, -1.45_76, 2.79_48, 2.42_04, -0.97_52, -1.25_46, 0.80_27, 3.27_58, 3.13_65 ]) lowerCAmelCase__ : Dict = torch.tensor([ -0.65_31, -0.68_91, -0.31_72, -0.53_75, -0.91_40, -0.53_67, -0.11_75, -0.78_69, -0.38_08, -0.45_13, -0.20_98, -0.00_83, 0.31_83, 0.51_40, 0.22_47, -0.13_04, -0.13_02, -0.28_02, -0.20_84, -0.20_25, -0.49_67, -0.48_73, -0.08_61, 0.69_25, 0.02_50, 0.12_90, -0.15_43, 0.63_16, 1.04_60, 1.49_43 ]) lowerCAmelCase__ : List[str] = torch.tensor([ 0.09_11, 0.11_07, 0.01_82, 0.04_35, -0.08_05, -0.06_08, 0.03_81, 0.21_72, -0.02_80, 0.13_27, -0.02_99, -0.02_55, -0.00_50, -0.11_70, -0.10_46, 0.03_09, 0.13_67, 0.17_28, -0.05_33, -0.07_48, -0.05_34, 0.16_24, 0.03_84, -0.18_05, -0.07_07, 0.06_42, 0.02_20, -0.01_34, -0.13_33, -0.15_05 ]) lowerCAmelCase__ : Union[str, Any] = torch.tensor([ 0.13_21, 0.13_37, 0.04_40, 0.06_22, -0.05_91, -0.03_70, 0.05_03, 0.21_33, -0.01_77, 0.14_15, -0.01_16, -0.01_12, 0.00_44, -0.09_80, -0.07_89, 0.03_95, 0.15_02, 0.17_85, -0.04_88, -0.05_14, -0.04_04, 0.15_39, 0.04_54, -0.15_59, -0.06_65, 0.06_59, 0.03_83, -0.00_05, -0.12_66, -0.13_86 ]) lowerCAmelCase__ : List[Any] = torch.tensor([ 0.11_54, 0.12_18, 0.03_07, 0.05_26, -0.07_11, -0.05_41, 0.03_66, 0.20_78, -0.02_67, 0.13_17, -0.02_26, -0.01_93, -0.00_14, -0.10_55, -0.09_02, 0.03_30, 0.13_91, 0.17_09, -0.05_62, -0.06_93, -0.05_60, 0.14_82, 0.03_81, -0.16_83, -0.06_81, 0.06_61, 0.03_31, -0.00_46, -0.12_68, -0.14_31 ]) lowerCAmelCase__ : Optional[Any] = torch.tensor([ 0.11_92, 0.12_40, 0.04_14, 0.06_06, -0.05_57, -0.04_12, 0.04_30, 0.20_42, -0.02_00, 0.13_85, -0.01_15, -0.01_32, 0.00_17, -0.09_65, -0.08_02, 0.03_98, 0.14_33, 0.17_47, -0.04_58, -0.05_33, -0.04_07, 0.15_45, 0.04_19, -0.15_74, -0.06_45, 0.06_26, 0.03_41, -0.00_10, -0.11_99, -0.13_90 ]) lowerCAmelCase__ : List[str] = torch.tensor([ 0.10_75, 0.10_74, 0.02_05, 0.04_31, -0.07_74, -0.06_07, 0.02_98, 0.20_42, -0.03_20, 0.12_67, -0.02_81, -0.02_50, -0.00_64, -0.10_91, -0.09_46, 0.02_90, 0.13_28, 0.16_50, -0.05_80, -0.07_38, -0.05_86, 0.14_40, 0.03_37, -0.17_46, -0.07_12, 0.06_05, 0.02_50, -0.00_99, -0.13_16, -0.14_73 ]) lowerCAmelCase__ : List[str] = torch.tensor([ -1.45_72, -2.04_81, -0.04_14, -0.60_05, 1.41_36, 0.58_48, 0.40_28, -2.73_30, 1.22_12, -2.12_28, 0.21_55, 0.40_39, 0.76_62, 2.05_35, 0.74_77, -0.32_43, -2.17_58, -2.76_48, 1.69_47, 0.70_26, 1.23_38, -1.60_78, -0.86_82, 2.28_10, 1.85_74, -0.57_18, -0.55_86, -0.01_86, 2.34_15, 2.12_51]) lowerCAmelCase__ : List[Any] = torch.tensor([ -1.36_90, -1.97_20, -0.40_90, -0.69_66, 1.46_60, 0.99_38, -0.13_85, -2.73_24, 0.77_36, -1.89_17, 0.29_23, 0.42_93, 0.16_93, 1.41_12, 1.18_87, -0.31_81, -2.21_60, -2.63_81, 1.31_70, 0.81_63, 0.92_40, -1.65_44, -0.60_99, 2.52_59, 1.64_30, -0.90_90, -0.93_92, -0.01_26, 2.42_68, 2.32_66 ]) lowerCAmelCase__ : Tuple = torch.tensor([ -1.35_25, -1.96_28, -0.39_56, -0.68_60, 1.46_64, 1.00_14, -0.12_59, -2.72_12, 0.77_72, -1.88_11, 0.29_96, 0.43_88, 0.17_04, 1.40_29, 1.17_01, -0.30_27, -2.20_53, -2.62_87, 1.33_50, 0.81_31, 0.92_74, -1.62_92, -0.60_98, 2.51_31, 1.65_05, -0.89_58, -0.92_98, -0.01_51, 2.42_57, 2.33_55 ]) lowerCAmelCase__ : List[str] = torch.tensor([ -2.05_85, -2.78_97, -0.28_50, -0.89_40, 1.90_52, 0.57_02, 0.63_45, -3.89_59, 1.59_32, -3.23_19, 0.19_74, 0.02_87, 1.75_66, 2.65_43, 0.83_87, -0.53_51, -3.27_36, -4.33_75, 2.90_29, 1.63_90, 1.46_40, -2.17_01, -1.90_13, 2.93_41, 3.49_81, -0.62_55, -1.16_44, -0.15_91, 3.70_97, 3.20_66 ]) lowerCAmelCase__ : Dict = torch.tensor([ -2.31_39, -2.55_94, -0.01_97, -0.67_85, 1.70_01, 1.16_06, 0.30_75, -2.17_40, 1.80_71, -2.56_30, -0.09_26, -0.38_11, 1.21_16, 2.62_46, 1.27_31, -0.53_98, -2.81_53, -3.61_40, 2.38_93, 1.32_62, 1.62_58, -2.18_56, -1.32_67, 2.83_95, 2.37_79, -1.06_23, -1.24_68, 0.89_59, 3.33_67, 3.22_43 ]) lowerCAmelCase__ : Dict = torch.tensor([ -2.06_28, -2.76_67, -0.20_89, -0.82_63, 2.05_39, 0.59_92, 0.64_95, -3.83_36, 1.60_25, -3.28_17, 0.17_21, -0.06_33, 1.75_16, 2.70_39, 0.81_00, -0.59_08, -3.21_13, -4.43_43, 2.92_57, 1.36_32, 1.55_62, -2.14_89, -1.98_94, 3.05_60, 3.33_96, -0.73_28, -1.04_17, 0.03_83, 3.70_93, 3.23_43 ]) lowerCAmelCase__ : Any = torch.tensor([ -1.45_74, -2.05_69, -0.04_73, -0.61_17, 1.40_18, 0.57_69, 0.41_29, -2.73_44, 1.22_41, -2.13_97, 0.20_00, 0.39_37, 0.76_16, 2.04_53, 0.73_24, -0.33_91, -2.17_46, -2.77_44, 1.69_63, 0.69_21, 1.21_87, -1.61_72, -0.88_77, 2.24_39, 1.84_71, -0.58_39, -0.56_05, -0.04_64, 2.32_50, 2.12_19 ]) # fmt: on lowerCAmelCase__ : Any = api.list_models(filter='''diffusers''') for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": lowerCAmelCase__ : List[str] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1] print(F'''Started running {mod.modelId}!!!''') if mod.modelId.startswith('''CompVis'''): lowerCAmelCase__ : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''') else: lowerCAmelCase__ : str = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) lowerCAmelCase__ : Any = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) lowerCAmelCase__ : List[str] = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): lowerCAmelCase__ : int = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3 ) print(F'''{mod.modelId} has passed successfully!!!''')
699
0
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class _lowerCAmelCase ( A__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] ='' SCREAMING_SNAKE_CASE_ : str =( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) SCREAMING_SNAKE_CASE_ : str =None # compression type in fsspec. ex: "gzip" SCREAMING_SNAKE_CASE_ : str =None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict = "" , SCREAMING_SNAKE_CASE__ : Union[str, Any] = None , SCREAMING_SNAKE_CASE__ : List[Any] = None , **SCREAMING_SNAKE_CASE__ : Any ): """simple docstring""" super().__init__(self , **SCREAMING_SNAKE_CASE__ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode UpperCamelCase = fsspec.open( SCREAMING_SNAKE_CASE__ , mode='rb' , protocol=SCREAMING_SNAKE_CASE__ , compression=self.compression , client_kwargs={ 'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459 'trust_env': True, # Enable reading proxy env variables. **(target_options or {}).pop('client_kwargs' , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) UpperCamelCase = os.path.basename(self.file.path.split('::' )[0] ) UpperCamelCase = ( self.compressed_name[: self.compressed_name.rindex('.' )] if '''.''' in self.compressed_name else self.compressed_name ) UpperCamelCase = None @classmethod def __lowerCAmelCase ( cls : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ): """simple docstring""" return super()._strip_protocol(SCREAMING_SNAKE_CASE__ ).lstrip('/' ) def __lowerCAmelCase ( self : str ): """simple docstring""" if self.dir_cache is None: UpperCamelCase = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name} UpperCamelCase = {f['''name''']: f} def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ): """simple docstring""" return self.file.open().read() def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple = "rb" , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[int]=None , **SCREAMING_SNAKE_CASE__ : int , ): """simple docstring""" UpperCamelCase = self._strip_protocol(SCREAMING_SNAKE_CASE__ ) if mode != "rb": raise ValueError(F'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' ) return self.file.open() class _lowerCAmelCase ( A__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any ='bz2' SCREAMING_SNAKE_CASE_ : int ='bz2' SCREAMING_SNAKE_CASE_ : Union[str, Any] ='.bz2' class _lowerCAmelCase ( A__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict ='gzip' SCREAMING_SNAKE_CASE_ : Optional[Any] ='gzip' SCREAMING_SNAKE_CASE_ : int ='.gz' class _lowerCAmelCase ( A__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str ='lz4' SCREAMING_SNAKE_CASE_ : int ='lz4' SCREAMING_SNAKE_CASE_ : Optional[int] ='.lz4' class _lowerCAmelCase ( A__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] ='xz' SCREAMING_SNAKE_CASE_ : List[str] ='xz' SCREAMING_SNAKE_CASE_ : Optional[int] ='.xz' class _lowerCAmelCase ( A__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int ='zstd' SCREAMING_SNAKE_CASE_ : List[Any] ='zstd' SCREAMING_SNAKE_CASE_ : Union[str, Any] ='.zst' def __init__( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] = "rb" , SCREAMING_SNAKE_CASE__ : Optional[Any] = None , SCREAMING_SNAKE_CASE__ : Any = None , SCREAMING_SNAKE_CASE__ : Optional[Any] = DEFAULT_BLOCK_SIZE , **SCREAMING_SNAKE_CASE__ : Any , ): """simple docstring""" super().__init__( fo=SCREAMING_SNAKE_CASE__ , mode=SCREAMING_SNAKE_CASE__ , target_protocol=SCREAMING_SNAKE_CASE__ , target_options=SCREAMING_SNAKE_CASE__ , block_size=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 UpperCamelCase = self.file.__enter__ class _lowerCAmelCase : """simple docstring""" def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ): """simple docstring""" UpperCamelCase = file_ def __enter__( self : Optional[int] ): """simple docstring""" self._file.__enter__() return self def __exit__( self : str , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Optional[Any] ): """simple docstring""" self._file.__exit__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __iter__( self : Tuple ): """simple docstring""" return iter(self._file ) def __lowerCAmelCase ( self : List[Any] ): """simple docstring""" return next(self._file ) def __getattr__( self : int , SCREAMING_SNAKE_CASE__ : Optional[int] ): """simple docstring""" return getattr(self._file , SCREAMING_SNAKE_CASE__ ) def fixed_enter(*SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : List[Any] ): return WrappedFile(_enter(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) ) UpperCamelCase = fixed_enter
282
from math import factorial def __lowercase ( _UpperCamelCase = 100 ) ->int: """simple docstring""" return sum(map(_UpperCamelCase, str(factorial(_UpperCamelCase ) ) ) ) if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
319
0
import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class __lowercase (_UpperCAmelCase ): def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : Optional[Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(A_ , '''width_multiplier''' ) ) class __lowercase : def __init__( self , A_ , A_=13 , A_=64 , A_=2 , A_=3 , A_="swish" , A_=3 , A_=32 , A_=0.1 , A_=0.02 , A_=True , A_=True , A_=10 , A_=None , A_=0.25 , A_=0.0 , A_=0.0 , ) ->str: '''simple docstring''' __lowerCAmelCase : Dict = parent __lowerCAmelCase : Optional[Any] = batch_size __lowerCAmelCase : List[str] = image_size __lowerCAmelCase : Union[str, Any] = patch_size __lowerCAmelCase : Tuple = num_channels __lowerCAmelCase : Optional[int] = make_divisible(512 * width_multiplier , divisor=8 ) __lowerCAmelCase : Union[str, Any] = hidden_act __lowerCAmelCase : Tuple = conv_kernel_size __lowerCAmelCase : Any = output_stride __lowerCAmelCase : List[Any] = classifier_dropout_prob __lowerCAmelCase : Dict = use_labels __lowerCAmelCase : List[Any] = is_training __lowerCAmelCase : Union[str, Any] = num_labels __lowerCAmelCase : Any = initializer_range __lowerCAmelCase : int = scope __lowerCAmelCase : Union[str, Any] = width_multiplier __lowerCAmelCase : int = ffn_dropout __lowerCAmelCase : Optional[Any] = attn_dropout def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCAmelCase : Union[str, Any] = None __lowerCAmelCase : List[str] = None if self.use_labels: __lowerCAmelCase : int = ids_tensor([self.batch_size] , self.num_labels ) __lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __lowerCAmelCase : Any = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCamelCase__ ( self ) ->int: '''simple docstring''' return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : Optional[Any] = MobileViTVaModel(config=A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : int = model(A_ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ ) ->str: '''simple docstring''' __lowerCAmelCase : Optional[int] = self.num_labels __lowerCAmelCase : str = MobileViTVaForImageClassification(A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : Tuple = model(A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : List[str] = self.num_labels __lowerCAmelCase : List[str] = MobileViTVaForSemanticSegmentation(A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : Optional[int] = model(A_ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) __lowerCAmelCase : Optional[int] = model(A_ , labels=A_ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs() __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Union[str, Any] = config_and_inputs __lowerCAmelCase : Optional[Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __lowercase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): _UpperCamelCase = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) _UpperCamelCase = ( { """feature-extraction""": MobileViTVaModel, """image-classification""": MobileViTVaForImageClassification, """image-segmentation""": MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : Any = MobileViTVaModelTester(self ) __lowerCAmelCase : Optional[Any] = MobileViTVaConfigTester(self , config_class=A_ , has_text_modality=A_ ) def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' ) def UpperCamelCase__ ( self ) ->str: '''simple docstring''' pass @unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' ) def UpperCamelCase__ ( self ) ->Union[str, Any]: '''simple docstring''' pass @unittest.skip(reason='''MobileViTV2 does not output attentions''' ) def UpperCamelCase__ ( self ) ->str: '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' ) def UpperCamelCase__ ( self ) ->Union[str, Any]: '''simple docstring''' pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' pass def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase, __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase : int = model_class(A_ ) __lowerCAmelCase : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCAmelCase : Dict = [*signature.parameters.keys()] __lowerCAmelCase : Any = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , A_ ) def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' def check_hidden_states_output(A_ , A_ , A_ ): __lowerCAmelCase : Any = model_class(A_ ) model.to(A_ ) model.eval() with torch.no_grad(): __lowerCAmelCase : Union[str, Any] = model(**self._prepare_for_class(A_ , A_ ) ) __lowerCAmelCase : Optional[Any] = outputs.hidden_states __lowerCAmelCase : int = 5 self.assertEqual(len(A_ ) , A_ ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. __lowerCAmelCase : Union[str, Any] = 2 for i in range(len(A_ ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) __lowerCAmelCase, __lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase : Dict = True check_hidden_states_output(A_ , A_ , A_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowerCAmelCase : Union[str, Any] = True check_hidden_states_output(A_ , A_ , A_ ) def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A_ ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*A_ ) @slow def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : Any = MobileViTVaModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def _lowercase ( ): __lowerCAmelCase : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __lowercase (unittest.TestCase ): @cached_property def UpperCamelCase__ ( self ) ->Union[str, Any]: '''simple docstring''' return ( MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ) if is_vision_available() else None ) @slow def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Optional[int] = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to( A_ ) __lowerCAmelCase : List[str] = self.default_image_processor __lowerCAmelCase : Tuple = prepare_img() __lowerCAmelCase : Any = image_processor(images=A_ , return_tensors='''pt''' ).to(A_ ) # forward pass with torch.no_grad(): __lowerCAmelCase : Dict = model(**A_ ) # verify the logits __lowerCAmelCase : Any = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , A_ ) __lowerCAmelCase : Optional[int] = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(A_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) ) @slow def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' __lowerCAmelCase : Dict = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) __lowerCAmelCase : Any = model.to(A_ ) __lowerCAmelCase : Tuple = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) __lowerCAmelCase : Dict = prepare_img() __lowerCAmelCase : Tuple = image_processor(images=A_ , return_tensors='''pt''' ).to(A_ ) # forward pass with torch.no_grad(): __lowerCAmelCase : Tuple = model(**A_ ) __lowerCAmelCase : Dict = outputs.logits # verify the logits __lowerCAmelCase : str = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , A_ ) __lowerCAmelCase : Tuple = torch.tensor( [ [[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]], [[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]], [[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]], ] , device=A_ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A_ , atol=1e-4 ) ) @slow def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Dict = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) __lowerCAmelCase : str = model.to(A_ ) __lowerCAmelCase : Dict = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) __lowerCAmelCase : str = prepare_img() __lowerCAmelCase : Optional[Any] = image_processor(images=A_ , return_tensors='''pt''' ).to(A_ ) # forward pass with torch.no_grad(): __lowerCAmelCase : Tuple = model(**A_ ) __lowerCAmelCase : Optional[int] = outputs.logits.detach().cpu() __lowerCAmelCase : List[str] = image_processor.post_process_semantic_segmentation(outputs=A_ , target_sizes=[(50, 60)] ) __lowerCAmelCase : List[str] = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , A_ ) __lowerCAmelCase : Dict = image_processor.post_process_semantic_segmentation(outputs=A_ ) __lowerCAmelCase : List[Any] = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , A_ )
583
def _lowercase ( lowercase__ , lowercase__ ): __lowerCAmelCase : Union[str, Any] = len(lowercase__ ) __lowerCAmelCase : Any = len(lowercase__ ) __lowerCAmelCase : str = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] __lowerCAmelCase : Optional[Any] = True for i in range(lowercase__ ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: __lowerCAmelCase : Union[str, Any] = True if a[i].islower(): __lowerCAmelCase : Optional[Any] = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
583
1
import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> Tuple: # Initialise PyTorch model __UpperCAmelCase =TaConfig.from_json_file(snake_case__ ) print(f"""Building PyTorch model from configuration: {config}""" ) __UpperCAmelCase =TaForConditionalGeneration(snake_case__ ) # Load weights from tf checkpoint load_tf_weights_in_ta(snake_case__ , snake_case__ , snake_case__ ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(snake_case__ ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) UpperCamelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
132
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: UpperCamelCase_ = None UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = '▁' UpperCamelCase_ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} UpperCamelCase_ = { 'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}, 'tokenizer_file': { 'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json' }, } UpperCamelCase_ = { 'google/pegasus-xsum': 5_1_2, } class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ): a_ : int = VOCAB_FILES_NAMES a_ : str = PRETRAINED_VOCAB_FILES_MAP a_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ : Optional[Any] = PegasusTokenizer a_ : str = ['''input_ids''', '''attention_mask'''] def __init__(self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="<pad>" , UpperCAmelCase="</s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<mask_2>" , UpperCAmelCase="<mask_1>" , UpperCAmelCase=None , UpperCAmelCase=1_0_3 , **UpperCAmelCase , ): '''simple docstring''' __UpperCAmelCase =offset if additional_special_tokens is not None: if not isinstance(UpperCAmelCase , UpperCAmelCase): raise TypeError( f"""additional_special_tokens should be of type {type(UpperCAmelCase)}, but is""" f""" {type(UpperCAmelCase)}""") __UpperCAmelCase =( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f"""<unk_{i}>""" for i in range(len(UpperCAmelCase) , self.offset - 1) ] if len(set(UpperCAmelCase)) != len(UpperCAmelCase): raise ValueError( '''Please make sure that the provided additional_special_tokens do not contain an incorrectly''' f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""") __UpperCAmelCase =additional_special_tokens_extended else: __UpperCAmelCase =[mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset)] super().__init__( UpperCAmelCase , tokenizer_file=UpperCAmelCase , pad_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , mask_token=UpperCAmelCase , mask_token_sent=UpperCAmelCase , offset=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , **UpperCAmelCase , ) __UpperCAmelCase =vocab_file __UpperCAmelCase =False if not self.vocab_file else True def A__ (self , UpperCAmelCase): '''simple docstring''' __UpperCAmelCase =set(self.all_special_ids) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens) + 3)): raise ValueError( '''There should be 3 special tokens: mask_token, pad_token, and eos_token +''' f""" {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}""") return [1 if x in all_special_ids else 0 for x in seq] def A__ (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False): '''simple docstring''' if already_has_special_tokens: return self._special_token_mask(UpperCAmelCase) elif token_ids_a is None: return self._special_token_mask(UpperCAmelCase) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a) + [1] def A__ (self , UpperCAmelCase , UpperCAmelCase=None): '''simple docstring''' if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def A__ (self , UpperCAmelCase , UpperCAmelCase = None): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''') if not os.path.isdir(UpperCAmelCase): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""") return __UpperCAmelCase =os.path.join( UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file''']) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase): copyfile(self.vocab_file , UpperCAmelCase) return (out_vocab_file,)
132
1
"""simple docstring""" from __future__ import annotations _lowercase = tuple[int, int, int] _lowercase = tuple[str, str, str] # used alphabet -------------------------- # from string.ascii_uppercase _lowercase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" # -------------------------- default selection -------------------------- # rotors -------------------------- _lowercase = "EGZWVONAHDCLFQMSIPJBYUKXTR" _lowercase = "FOBHMDKEXQNRAULPGSJVTYICZW" _lowercase = "ZJXESIUQLHAVRMDOYGTNFWPBKC" # reflector -------------------------- _lowercase = { "A": "N", "N": "A", "B": "O", "O": "B", "C": "P", "P": "C", "D": "Q", "Q": "D", "E": "R", "R": "E", "F": "S", "S": "F", "G": "T", "T": "G", "H": "U", "U": "H", "I": "V", "V": "I", "J": "W", "W": "J", "K": "X", "X": "K", "L": "Y", "Y": "L", "M": "Z", "Z": "M", } # -------------------------- extra rotors -------------------------- _lowercase = "RMDJXFUWGISLHVTCQNKYPBEZOA" _lowercase = "SGLCPQWZHKXAREONTFBVIYJUDM" _lowercase = "HVSICLTYKQUBXDWAJZOMFGPREN" _lowercase = "RZWQHFMVDBKICJLNTUXAGYPSOE" _lowercase = "LFKIJODBEGAMQPXVUHYSTCZRWN" _lowercase = "KOAEGVDHXPQZMLFTYWJNBRCIUS" def snake_case__ ( __lowerCamelCase : RotorPositionT , __lowerCamelCase : RotorSelectionT , __lowerCamelCase : str ): """simple docstring""" # Checks if there are 3 unique rotors if (unique_rotsel := len(set(__lowerCamelCase ) )) < 3: lowerCamelCase__ : List[Any] =f'''Please use 3 unique rotors (not {unique_rotsel})''' raise Exception(__lowerCamelCase ) # Checks if rotor positions are valid lowerCamelCase__ : Any =rotpos if not 0 < rotorposa <= len(__lowerCamelCase ): lowerCamelCase__ : str =f'''First rotor position is not within range of 1..26 ({rotorposa}''' raise ValueError(__lowerCamelCase ) if not 0 < rotorposa <= len(__lowerCamelCase ): lowerCamelCase__ : List[Any] =f'''Second rotor position is not within range of 1..26 ({rotorposa})''' raise ValueError(__lowerCamelCase ) if not 0 < rotorposa <= len(__lowerCamelCase ): lowerCamelCase__ : Union[str, Any] =f'''Third rotor position is not within range of 1..26 ({rotorposa})''' raise ValueError(__lowerCamelCase ) # Validates string and returns dict lowerCamelCase__ : Dict =_plugboard(__lowerCamelCase ) return rotpos, rotsel, pbdict def snake_case__ ( __lowerCamelCase : str ): """simple docstring""" # tests the input string if it # a) is type string # b) has even length (so pairs can be made) if not isinstance(__lowerCamelCase , __lowerCamelCase ): lowerCamelCase__ : str =f'''Plugboard setting isn\'t type string ({type(__lowerCamelCase )})''' raise TypeError(__lowerCamelCase ) elif len(__lowerCamelCase ) % 2 != 0: lowerCamelCase__ : Any =f'''Odd number of symbols ({len(__lowerCamelCase )})''' raise Exception(__lowerCamelCase ) elif pbstring == "": return {} pbstring.replace(''' ''' , '''''' ) # Checks if all characters are unique lowerCamelCase__ : str =set() for i in pbstring: if i not in abc: lowerCamelCase__ : Optional[int] =f'''\'{i}\' not in list of symbols''' raise Exception(__lowerCamelCase ) elif i in tmppbl: lowerCamelCase__ : Optional[int] =f'''Duplicate symbol ({i})''' raise Exception(__lowerCamelCase ) else: tmppbl.add(__lowerCamelCase ) del tmppbl # Created the dictionary lowerCamelCase__ : Dict ={} for j in range(0 , len(__lowerCamelCase ) - 1 , 2 ): lowerCamelCase__ : List[str] =pbstring[j + 1] lowerCamelCase__ : Dict =pbstring[j] return pb def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : RotorPositionT , __lowerCamelCase : RotorSelectionT = (rotora, rotora, rotora) , __lowerCamelCase : str = "" , ): """simple docstring""" lowerCamelCase__ : Union[str, Any] =text.upper() lowerCamelCase__ : Union[str, Any] =_validator( __lowerCamelCase , __lowerCamelCase , plugb.upper() ) lowerCamelCase__ : Any =rotor_position lowerCamelCase__ : Dict =rotor_selection rotorposa -= 1 rotorposa -= 1 rotorposa -= 1 lowerCamelCase__ : List[str] =[] # encryption/decryption process -------------------------- for symbol in text: if symbol in abc: # 1st plugboard -------------------------- if symbol in plugboard: lowerCamelCase__ : Tuple =plugboard[symbol] # rotor ra -------------------------- lowerCamelCase__ : Union[str, Any] =abc.index(__lowerCamelCase ) + rotorposa lowerCamelCase__ : Tuple =rotora[index % len(__lowerCamelCase )] # rotor rb -------------------------- lowerCamelCase__ : List[Any] =abc.index(__lowerCamelCase ) + rotorposa lowerCamelCase__ : Tuple =rotora[index % len(__lowerCamelCase )] # rotor rc -------------------------- lowerCamelCase__ : List[str] =abc.index(__lowerCamelCase ) + rotorposa lowerCamelCase__ : Optional[int] =rotora[index % len(__lowerCamelCase )] # reflector -------------------------- # this is the reason you don't need another machine to decipher lowerCamelCase__ : List[Any] =reflector[symbol] # 2nd rotors lowerCamelCase__ : List[Any] =abc[rotora.index(__lowerCamelCase ) - rotorposa] lowerCamelCase__ : List[str] =abc[rotora.index(__lowerCamelCase ) - rotorposa] lowerCamelCase__ : Union[str, Any] =abc[rotora.index(__lowerCamelCase ) - rotorposa] # 2nd plugboard if symbol in plugboard: lowerCamelCase__ : int =plugboard[symbol] # moves/resets rotor positions rotorposa += 1 if rotorposa >= len(__lowerCamelCase ): lowerCamelCase__ : str =0 rotorposa += 1 if rotorposa >= len(__lowerCamelCase ): lowerCamelCase__ : Union[str, Any] =0 rotorposa += 1 if rotorposa >= len(__lowerCamelCase ): lowerCamelCase__ : str =0 # else: # pass # Error could be also raised # raise ValueError( # 'Invalid symbol('+repr(symbol)+')') result.append(__lowerCamelCase ) return "".join(__lowerCamelCase ) if __name__ == "__main__": _lowercase = "This is my Python script that emulates the Enigma machine from WWII." _lowercase = (1, 1, 1) _lowercase = "pictures" _lowercase = (rotora, rotora, rotora) _lowercase = enigma(message, rotor_pos, rotor_sel, pb) print("Encrypted message:", en) print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
700
"""simple docstring""" import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def snake_case__ ( __lowerCamelCase : List[Any] ): """simple docstring""" if isinstance(__lowerCamelCase , collections.abc.Iterable ): return x return (x, x) @require_flax class __SCREAMING_SNAKE_CASE : '''simple docstring''' def snake_case ( self : Dict, lowerCamelCase : List[str], lowerCamelCase : Any )-> Union[str, Any]: pass def snake_case ( self : List[str] )-> List[str]: pass def snake_case ( self : Optional[Any] )-> str: pass def snake_case ( self : Union[str, Any], lowerCamelCase : np.ndarray, lowerCamelCase : np.ndarray, lowerCamelCase : float )-> Dict: lowerCamelCase__ : Union[str, Any] =np.abs((a - b) ).max() self.assertLessEqual(lowerCamelCase, lowerCamelCase, F'''Difference between torch and flax is {diff} (>= {tol}).''' ) def snake_case ( self : Dict, lowerCamelCase : Tuple, lowerCamelCase : Any, lowerCamelCase : List[str], lowerCamelCase : Dict, lowerCamelCase : Any=None, **lowerCamelCase : str )-> int: lowerCamelCase__ : List[str] =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Tuple =FlaxVisionTextDualEncoderModel(lowerCamelCase ) lowerCamelCase__ : Dict =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase ) self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], config.projection_dim) ) def snake_case ( self : Any, lowerCamelCase : int, lowerCamelCase : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : str=None, **lowerCamelCase : List[Any] )-> int: lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Union[str, Any] ={'''vision_model''': vision_model, '''text_model''': text_model} lowerCamelCase__ : Tuple =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase ) lowerCamelCase__ : Optional[int] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase ) self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], model.config.projection_dim) ) def snake_case ( self : Any, lowerCamelCase : Dict, lowerCamelCase : Dict, lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Dict=None, **lowerCamelCase : int )-> List[str]: lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Optional[int] ={'''vision_model''': vision_model, '''text_model''': text_model} lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase ) lowerCamelCase__ : List[Any] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase ) lowerCamelCase__ : int =output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCamelCase ) lowerCamelCase__ : Dict =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase ) lowerCamelCase__ : Optional[int] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase ) lowerCamelCase__ : List[str] =after_output[0] lowerCamelCase__ : Any =np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCamelCase, 1E-3 ) def snake_case ( self : Optional[Any], lowerCamelCase : Dict, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : List[Any]=None, **lowerCamelCase : List[Any] )-> Tuple: lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Any ={'''vision_model''': vision_model, '''text_model''': text_model} lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase ) lowerCamelCase__ : List[str] =model( input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase, output_attentions=lowerCamelCase ) lowerCamelCase__ : int =output.vision_model_output.attentions self.assertEqual(len(lowerCamelCase ), vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCamelCase__ : Tuple =to_atuple(vision_model.config.image_size ) lowerCamelCase__ : Optional[Any] =to_atuple(vision_model.config.patch_size ) lowerCamelCase__ : Union[str, Any] =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) lowerCamelCase__ : int =num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len) ) lowerCamelCase__ : List[Any] =output.text_model_output.attentions self.assertEqual(len(lowerCamelCase ), text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), ) def snake_case ( self : Tuple, lowerCamelCase : Optional[int], lowerCamelCase : Any, lowerCamelCase : Union[str, Any] )-> Any: pt_model.to(lowerCamelCase ) pt_model.eval() # prepare inputs lowerCamelCase__ : Any =inputs_dict lowerCamelCase__ : Any ={k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): lowerCamelCase__ : List[str] =pt_model(**lowerCamelCase ).to_tuple() lowerCamelCase__ : Optional[Any] =fx_model(**lowerCamelCase ).to_tuple() self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4] ): self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowerCamelCase ) lowerCamelCase__ : Optional[int] =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_pt=lowerCamelCase ) lowerCamelCase__ : List[Any] =fx_model_loaded(**lowerCamelCase ).to_tuple() self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4] ): self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowerCamelCase ) lowerCamelCase__ : str =VisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_flax=lowerCamelCase ) pt_model_loaded.to(lowerCamelCase ) pt_model_loaded.eval() with torch.no_grad(): lowerCamelCase__ : List[Any] =pt_model_loaded(**lowerCamelCase ).to_tuple() self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4] ): self.assert_almost_equals(lowerCamelCase, pt_output_loaded.numpy(), 4E-2 ) def snake_case ( self : str, lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[Any], lowerCamelCase : str )-> List[Any]: lowerCamelCase__ : Any =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : List[Any] =VisionTextDualEncoderModel(lowerCamelCase ) lowerCamelCase__ : List[str] =FlaxVisionTextDualEncoderModel(lowerCamelCase ) lowerCamelCase__ : str =convert_pytorch_state_dict_to_flax(pt_model.state_dict(), lowerCamelCase ) lowerCamelCase__ : Tuple =fx_state self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase ) def snake_case ( self : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any] )-> Optional[int]: lowerCamelCase__ : Dict =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Tuple =VisionTextDualEncoderModel(lowerCamelCase ) lowerCamelCase__ : List[str] =FlaxVisionTextDualEncoderModel(lowerCamelCase ) lowerCamelCase__ : Tuple =load_flax_weights_in_pytorch_model(lowerCamelCase, fx_model.params ) self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase ) def snake_case ( self : Optional[int] )-> Union[str, Any]: lowerCamelCase__ : Any =self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**lowerCamelCase ) def snake_case ( self : Tuple )-> int: lowerCamelCase__ : int =self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase ) def snake_case ( self : Tuple )-> Any: lowerCamelCase__ : Tuple =self.prepare_config_and_inputs() self.check_save_load(**lowerCamelCase ) def snake_case ( self : str )-> Any: lowerCamelCase__ : str =self.prepare_config_and_inputs() self.check_vision_text_output_attention(**lowerCamelCase ) @is_pt_flax_cross_test def snake_case ( self : Tuple )-> List[Any]: lowerCamelCase__ : Union[str, Any] =self.prepare_config_and_inputs() lowerCamelCase__ : Union[str, Any] =config_inputs_dict.pop('''vision_config''' ) lowerCamelCase__ : Optional[Any] =config_inputs_dict.pop('''text_config''' ) lowerCamelCase__ : Tuple =config_inputs_dict self.check_equivalence_pt_to_flax(lowerCamelCase, lowerCamelCase, lowerCamelCase ) self.check_equivalence_flax_to_pt(lowerCamelCase, lowerCamelCase, lowerCamelCase ) @slow def snake_case ( self : Optional[Any] )-> Tuple: lowerCamelCase__ , lowerCamelCase__ : Dict =self.get_pretrained_model_and_inputs() lowerCamelCase__ : Optional[int] =model_a(**lowerCamelCase ) lowerCamelCase__ : List[str] =outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(lowerCamelCase ) lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =model_a(**lowerCamelCase ) lowerCamelCase__ : List[Any] =after_outputs[0] lowerCamelCase__ : Any =np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCamelCase, 1E-5 ) @require_flax class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' def snake_case ( self : Optional[int] )-> Optional[Any]: lowerCamelCase__ : str =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-vit''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, ) lowerCamelCase__ : Union[str, Any] =13 lowerCamelCase__ : List[str] =floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowerCamelCase__ : List[str] =ids_tensor([batch_size, 4], model.config.text_config.vocab_size ) lowerCamelCase__ : Optional[int] =random_attention_mask([batch_size, 4] ) lowerCamelCase__ : Any ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def snake_case ( self : str, lowerCamelCase : str, lowerCamelCase : int )-> int: lowerCamelCase__ : str =FlaxViTModel(lowerCamelCase ) lowerCamelCase__ : Any =FlaxBertModel(lowerCamelCase ) return vision_model, text_model def snake_case ( self : int )-> Optional[int]: lowerCamelCase__ : Any =FlaxViTModelTester(self ) lowerCamelCase__ : Union[str, Any] =FlaxBertModelTester(self ) lowerCamelCase__ : Any =vit_model_tester.prepare_config_and_inputs() lowerCamelCase__ : Optional[Any] =bert_model_tester.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ : Any =vision_config_and_inputs lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' def snake_case ( self : Optional[int] )-> Optional[int]: lowerCamelCase__ : Union[str, Any] =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-clip''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, ) lowerCamelCase__ : Union[str, Any] =13 lowerCamelCase__ : Optional[Any] =floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowerCamelCase__ : Union[str, Any] =ids_tensor([batch_size, 4], model.config.text_config.vocab_size ) lowerCamelCase__ : str =random_attention_mask([batch_size, 4] ) lowerCamelCase__ : Optional[int] ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def snake_case ( self : List[str], lowerCamelCase : Any, lowerCamelCase : Dict )-> Dict: lowerCamelCase__ : str =FlaxCLIPVisionModel(lowerCamelCase ) lowerCamelCase__ : Optional[Any] =FlaxBertModel(lowerCamelCase ) return vision_model, text_model def snake_case ( self : Optional[int] )-> Optional[Any]: lowerCamelCase__ : List[Any] =FlaxCLIPVisionModelTester(self ) lowerCamelCase__ : List[Any] =FlaxBertModelTester(self ) lowerCamelCase__ : Any =clip_model_tester.prepare_config_and_inputs() lowerCamelCase__ : Optional[int] =bert_model_tester.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ : List[Any] =vision_config_and_inputs lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @slow def snake_case ( self : Tuple )-> Optional[Any]: lowerCamelCase__ : Any =FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''', logit_scale_init_value=1.0 ) lowerCamelCase__ : List[Any] =VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' ) lowerCamelCase__ : Optional[int] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCamelCase__ : Dict =processor( text=['''una foto di un gatto''', '''una foto di un cane'''], images=lowerCamelCase, padding=lowerCamelCase, return_tensors='''np''' ) lowerCamelCase__ : List[Any] =model(**lowerCamelCase ) # verify the logits self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), ) lowerCamelCase__ : Any =np.array([[1.2_284_727, 0.3_104_122]] ) self.assertTrue(np.allclose(outputs.logits_per_image, lowerCamelCase, atol=1E-3 ) )
625
0
'''simple docstring''' import warnings warnings.warn( "memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: " "`from accelerate import find_executable_batch_size` to avoid this warning.", FutureWarning, )
597
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer a__ : List[Any] = logging.get_logger(__name__) a__ : Dict = {'vocab_file': 'vocab.txt'} a__ : Dict = { 'vocab_file': { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt', } } a__ : Dict = { 'YituTech/conv-bert-base': 5_1_2, 'YituTech/conv-bert-medium-small': 5_1_2, 'YituTech/conv-bert-small': 5_1_2, } a__ : Optional[Any] = { 'YituTech/conv-bert-base': {'do_lower_case': True}, 'YituTech/conv-bert-medium-small': {'do_lower_case': True}, 'YituTech/conv-bert-small': {'do_lower_case': True}, } class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = ConvBertTokenizer def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> List[str]: super().__init__( lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , ) __UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , lowercase ) != do_lower_case or normalizer_state.get("""strip_accents""" , lowercase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , lowercase ) != tokenize_chinese_chars ): __UpperCamelCase = getattr(lowercase , normalizer_state.pop("""type""" ) ) __UpperCamelCase = do_lower_case __UpperCamelCase = strip_accents __UpperCamelCase = tokenize_chinese_chars __UpperCamelCase = normalizer_class(**lowercase ) __UpperCamelCase = do_lower_case def __lowerCamelCase ( self , lowercase , lowercase=None ) -> List[Any]: __UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[int]: __UpperCamelCase = [self.sep_token_id] __UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]: __UpperCamelCase = self._tokenizer.model.save(lowercase , name=lowercase ) return tuple(lowercase )
601
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) lowerCAmelCase_ = { '''configuration_speecht5''': [ '''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''', '''SpeechT5Config''', '''SpeechT5HifiGanConfig''', ], '''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''], '''processing_speecht5''': ['''SpeechT5Processor'''], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''SpeechT5Tokenizer'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''', '''SpeechT5ForSpeechToText''', '''SpeechT5ForSpeechToSpeech''', '''SpeechT5ForTextToSpeech''', '''SpeechT5Model''', '''SpeechT5PreTrainedModel''', '''SpeechT5HifiGan''', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
494
"""simple docstring""" import sys lowerCAmelCase_ = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" _UpperCAmelCase = 1 for digit in s: product *= int(SCREAMING_SNAKE_CASE ) return product def __lowerCamelCase ( SCREAMING_SNAKE_CASE = N ) -> int: """simple docstring""" _UpperCAmelCase = -sys.maxsize - 1 _UpperCAmelCase = n[:13] _UpperCAmelCase = 13 while cur_index < len(SCREAMING_SNAKE_CASE ) - 13: if int(n[cur_index] ) >= int(substr[0] ): _UpperCAmelCase = substr[1:] + n[cur_index] cur_index += 1 else: _UpperCAmelCase = max(SCREAMING_SNAKE_CASE,str_eval(SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = n[cur_index : cur_index + 13] cur_index += 13 return largest_product if __name__ == "__main__": print(F'''{solution() = }''')
494
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) A_ : Optional[Any] = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : List[str] = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys A_ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
38
'''simple docstring''' def _UpperCamelCase ( UpperCamelCase__ = 2_0_0_0_0_0_0 ): UpperCAmelCase__ : Any = [0 for i in range(n + 1 )] UpperCAmelCase__ : str = 1 UpperCAmelCase__ : List[str] = 1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , UpperCamelCase__ ): UpperCAmelCase__ : Optional[Any] = 1 UpperCAmelCase__ : Any = 0 for i in range(UpperCamelCase__ ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(f"""{solution() = }""")
407
0
from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class A_ ( __lowerCamelCase ): '''simple docstring''' _UpperCamelCase : str = """EncodecFeatureExtractor""" _UpperCamelCase : str = ("""T5Tokenizer""", """T5TokenizerFast""") def __init__( self , snake_case , snake_case ): super().__init__(snake_case , snake_case ) lowercase = self.feature_extractor lowercase = False def SCREAMING_SNAKE_CASE__ ( self , snake_case=None , snake_case=None , snake_case=True ): return self.tokenizer.get_decoder_prompt_ids(task=snake_case , language=snake_case , no_timestamps=snake_case ) def __call__( self , *snake_case , **snake_case ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*snake_case , **snake_case ) lowercase = kwargs.pop('audio' , snake_case ) lowercase = kwargs.pop('sampling_rate' , snake_case ) lowercase = kwargs.pop('text' , snake_case ) if len(snake_case ) > 0: lowercase = args[0] lowercase = args[1:] if audio is None and text is None: raise ValueError('You need to specify either an `audio` or `text` input to process.' ) if text is not None: lowercase = self.tokenizer(snake_case , **snake_case ) if audio is not None: lowercase = self.feature_extractor(snake_case , *snake_case , sampling_rate=snake_case , **snake_case ) if audio is None: return inputs elif text is None: return audio_inputs else: lowercase = audio_inputs['input_values'] if "padding_mask" in audio_inputs: lowercase = audio_inputs['padding_mask'] return inputs def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ): lowercase = kwargs.pop('audio' , snake_case ) lowercase = kwargs.pop('padding_mask' , snake_case ) if len(snake_case ) > 0: lowercase = args[0] lowercase = args[1:] if audio_values is not None: return self._decode_audio(snake_case , padding_mask=snake_case ) else: return self.tokenizer.batch_decode(*snake_case , **snake_case ) def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ): return self.tokenizer.decode(*snake_case , **snake_case ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ): lowercase = to_numpy(snake_case ) lowercase , lowercase , lowercase = audio_values.shape if padding_mask is None: return list(snake_case ) lowercase = to_numpy(snake_case ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) lowercase = seq_len - padding_mask.shape[-1] lowercase = 1 - self.feature_extractor.padding_value lowercase = np.pad(snake_case , ((0, 0), (0, difference)) , 'constant' , constant_values=snake_case ) lowercase = audio_values.tolist() for i in range(snake_case ): lowercase = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] lowercase = sliced_audio.reshape(snake_case , -1 ) return audio_values
710
from typing import List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { '''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''', } class A_ ( __lowerCamelCase ): '''simple docstring''' _UpperCamelCase : int = """autoformer""" _UpperCamelCase : Any = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", """num_hidden_layers""": """encoder_layers""", } def __init__( self , snake_case = None , snake_case = None , snake_case = "student_t" , snake_case = "nll" , snake_case = 1 , snake_case = [1, 2, 3, 4, 5, 6, 7] , snake_case = True , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = None , snake_case = None , snake_case = 64 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = 32 , snake_case = 32 , snake_case = "gelu" , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 100 , snake_case = 0.02 , snake_case = True , snake_case=True , snake_case = 10 , snake_case = 25 , snake_case = 3 , **snake_case , ): # time series specific configuration lowercase = prediction_length lowercase = context_length if context_length is not None else prediction_length lowercase = distribution_output lowercase = loss lowercase = input_size lowercase = num_time_features lowercase = lags_sequence lowercase = scaling lowercase = num_dynamic_real_features lowercase = num_static_real_features lowercase = num_static_categorical_features if cardinality is not None and num_static_categorical_features > 0: if len(snake_case ) != num_static_categorical_features: raise ValueError( 'The cardinality should be a list of the same length as `num_static_categorical_features`' ) lowercase = cardinality else: lowercase = [0] if embedding_dimension is not None and num_static_categorical_features > 0: if len(snake_case ) != num_static_categorical_features: raise ValueError( 'The embedding dimension should be a list of the same length as `num_static_categorical_features`' ) lowercase = embedding_dimension else: lowercase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] lowercase = num_parallel_samples # Transformer architecture configuration lowercase = input_size * len(self.lags_sequence ) + self._number_of_features lowercase = d_model lowercase = encoder_attention_heads lowercase = decoder_attention_heads lowercase = encoder_ffn_dim lowercase = decoder_ffn_dim lowercase = encoder_layers lowercase = decoder_layers lowercase = dropout lowercase = attention_dropout lowercase = activation_dropout lowercase = encoder_layerdrop lowercase = decoder_layerdrop lowercase = activation_function lowercase = init_std lowercase = use_cache # Autoformer lowercase = label_length lowercase = moving_average lowercase = autocorrelation_factor super().__init__(is_encoder_decoder=snake_case , **snake_case ) @property def SCREAMING_SNAKE_CASE__ ( self ): return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
565
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase__ ={ "configuration_vision_encoder_decoder": ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ =["VisionEncoderDecoderModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ =["TFVisionEncoderDecoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ =["FlaxVisionEncoderDecoderModel"] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys lowerCAmelCase__ =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
482
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json", } class SCREAMING_SNAKE_CASE_ ( _a ): """simple docstring""" __lowerCAmelCase : List[str] ='''mvp''' __lowerCAmelCase : List[Any] =['''past_key_values'''] __lowerCAmelCase : Optional[Any] ={'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self :Any, snake_case :Any=5_0267, snake_case :List[str]=1024, snake_case :List[str]=12, snake_case :List[Any]=4096, snake_case :List[Any]=16, snake_case :Optional[int]=12, snake_case :Optional[int]=4096, snake_case :List[str]=16, snake_case :Optional[Any]=0.0, snake_case :Tuple=0.0, snake_case :str="gelu", snake_case :int=1024, snake_case :Any=0.1, snake_case :Dict=0.0, snake_case :Optional[Any]=0.0, snake_case :int=0.0_2, snake_case :Union[str, Any]=0.0, snake_case :Tuple=False, snake_case :Optional[int]=True, snake_case :Any=1, snake_case :Union[str, Any]=0, snake_case :Optional[Any]=2, snake_case :Union[str, Any]=True, snake_case :Tuple=2, snake_case :Any=2, snake_case :List[Any]=False, snake_case :Optional[int]=100, snake_case :Union[str, Any]=800, **snake_case :Tuple, ): """simple docstring""" _lowercase =vocab_size _lowercase =max_position_embeddings _lowercase =d_model _lowercase =encoder_ffn_dim _lowercase =encoder_layers _lowercase =encoder_attention_heads _lowercase =decoder_ffn_dim _lowercase =decoder_layers _lowercase =decoder_attention_heads _lowercase =dropout _lowercase =attention_dropout _lowercase =activation_dropout _lowercase =activation_function _lowercase =init_std _lowercase =encoder_layerdrop _lowercase =decoder_layerdrop _lowercase =classifier_dropout _lowercase =use_cache _lowercase =encoder_layers _lowercase =scale_embedding # scale factor will be sqrt(d_model) if True _lowercase =use_prompt _lowercase =prompt_length _lowercase =prompt_mid_dim super().__init__( pad_token_id=snake_case, bos_token_id=snake_case, eos_token_id=snake_case, is_encoder_decoder=snake_case, decoder_start_token_id=snake_case, forced_eos_token_id=snake_case, **snake_case, ) if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated', snake_case): _lowercase =self.bos_token_id warnings.warn( f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ''' 'The config can simply be saved and uploaded again to be fixed.')
181
0
"""simple docstring""" from __future__ import annotations def A ( snake_case__ ): '''simple docstring''' if len(snake_case__ ) < 2: raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" ) if any(i <= 0 for i in nums ): raise ValueError("""All values must be greater than 0""" ) SCREAMING_SNAKE_CASE__ = nums.copy() copy_nums.sort() return copy_nums[-1] < sum(copy_nums[:-1] ) if __name__ == "__main__": import doctest doctest.testmod()
616
"""simple docstring""" def A ( snake_case__ ): '''simple docstring''' if not nums: # Makes sure that the list is not empty raise ValueError("""List is empty""" ) SCREAMING_SNAKE_CASE__ = sum(snake_case__ ) / len(snake_case__ ) # Calculate the average return sum(abs(x - average ) for x in nums ) / len(snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
616
1
import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): __UpperCamelCase =StableDiffusionDiffEditPipeline __UpperCamelCase =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"} __UpperCamelCase =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"} __UpperCamelCase =frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __UpperCamelCase =frozenset([] ) def UpperCamelCase ( self : List[Any] ): """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=snake_case__ , ) SCREAMING_SNAKE_CASE = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , ) SCREAMING_SNAKE_CASE = DDIMInverseScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_zero=snake_case__ , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , ) SCREAMING_SNAKE_CASE = CLIPTextModel(snake_case__ ) SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) SCREAMING_SNAKE_CASE = { 'unet': unet, 'scheduler': scheduler, 'inverse_scheduler': inverse_scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def UpperCamelCase ( self : Any , snake_case__ : Optional[Any] , snake_case__ : Any=0 ): """simple docstring""" SCREAMING_SNAKE_CASE = floats_tensor((1, 1_6, 1_6) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) SCREAMING_SNAKE_CASE = floats_tensor((1, 2, 4, 1_6, 1_6) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) if str(snake_case__ ).startswith('mps' ): SCREAMING_SNAKE_CASE = torch.manual_seed(snake_case__ ) else: SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) SCREAMING_SNAKE_CASE = { 'prompt': 'a dog and a newt', 'mask_image': mask, 'image_latents': latents, 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def UpperCamelCase ( self : Dict , snake_case__ : Optional[int] , snake_case__ : List[str]=0 ): """simple docstring""" SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ) if str(snake_case__ ).startswith('mps' ): SCREAMING_SNAKE_CASE = torch.manual_seed(snake_case__ ) else: SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) SCREAMING_SNAKE_CASE = { 'image': image, 'source_prompt': 'a cat and a frog', 'target_prompt': 'a dog and a newt', 'generator': generator, 'num_inference_steps': 2, 'num_maps_per_mask': 2, 'mask_encode_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def UpperCamelCase ( self : Tuple , snake_case__ : Optional[int] , snake_case__ : int=0 ): """simple docstring""" SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ) if str(snake_case__ ).startswith('mps' ): SCREAMING_SNAKE_CASE = torch.manual_seed(snake_case__ ) else: SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) SCREAMING_SNAKE_CASE = { 'image': image, 'prompt': 'a cat and a frog', 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'decode_latents': True, 'output_type': 'numpy', } return inputs def UpperCamelCase ( self : Optional[int] ): """simple docstring""" if not hasattr(self.pipeline_class , '_optional_components' ): return SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = self.pipeline_class(**snake_case__ ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(snake_case__ , snake_case__ , snake_case__ ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ ) SCREAMING_SNAKE_CASE = pipe(**snake_case__ )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(snake_case__ ) SCREAMING_SNAKE_CASE = self.pipeline_class.from_pretrained(snake_case__ ) pipe_loaded.to(snake_case__ ) pipe_loaded.set_progress_bar_config(disable=snake_case__ ) for optional_component in pipe._optional_components: self.assertTrue( getattr(snake_case__ , snake_case__ ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , ) SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ ) SCREAMING_SNAKE_CASE = pipe_loaded(**snake_case__ )[0] SCREAMING_SNAKE_CASE = np.abs(output - output_loaded ).max() self.assertLess(snake_case__ , 1E-4 ) def UpperCamelCase ( self : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE = 'cpu' SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = self.pipeline_class(**snake_case__ ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) SCREAMING_SNAKE_CASE = self.get_dummy_mask_inputs(snake_case__ ) SCREAMING_SNAKE_CASE = pipe.generate_mask(**snake_case__ ) SCREAMING_SNAKE_CASE = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 1_6, 1_6) ) SCREAMING_SNAKE_CASE = np.array([0] * 9 ) SCREAMING_SNAKE_CASE = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(snake_case__ , 1E-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def UpperCamelCase ( self : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE = 'cpu' SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = self.pipeline_class(**snake_case__ ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) SCREAMING_SNAKE_CASE = self.get_dummy_inversion_inputs(snake_case__ ) SCREAMING_SNAKE_CASE = pipe.invert(**snake_case__ ).images SCREAMING_SNAKE_CASE = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 3_2, 3_2, 3) ) SCREAMING_SNAKE_CASE = np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) SCREAMING_SNAKE_CASE = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(snake_case__ , 1E-3 ) def UpperCamelCase ( self : Dict ): """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=5E-3 ) def UpperCamelCase ( self : int ): """simple docstring""" SCREAMING_SNAKE_CASE = 'cpu' SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = {'beta_start': 0.00_085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'} SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler(**snake_case__ ) SCREAMING_SNAKE_CASE = DPMSolverMultistepInverseScheduler(**snake_case__ ) SCREAMING_SNAKE_CASE = self.pipeline_class(**snake_case__ ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) SCREAMING_SNAKE_CASE = self.get_dummy_inversion_inputs(snake_case__ ) SCREAMING_SNAKE_CASE = pipe.invert(**snake_case__ ).images SCREAMING_SNAKE_CASE = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 3_2, 3_2, 3) ) SCREAMING_SNAKE_CASE = np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) SCREAMING_SNAKE_CASE = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(snake_case__ , 1E-3 ) @require_torch_gpu @slow class UpperCamelCase ( unittest.TestCase ): def UpperCamelCase ( self : str ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def UpperCamelCase ( cls : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' ) SCREAMING_SNAKE_CASE = raw_image.convert('RGB' ).resize((7_6_8, 7_6_8) ) SCREAMING_SNAKE_CASE = raw_image def UpperCamelCase ( self : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1' , safety_checker=snake_case__ , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE = DDIMScheduler.from_config(pipe.scheduler.config ) SCREAMING_SNAKE_CASE = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=snake_case__ ) SCREAMING_SNAKE_CASE = 'a bowl of fruit' SCREAMING_SNAKE_CASE = 'a bowl of pears' SCREAMING_SNAKE_CASE = pipe.generate_mask( image=self.raw_image , source_prompt=snake_case__ , target_prompt=snake_case__ , generator=snake_case__ , ) SCREAMING_SNAKE_CASE = pipe.invert( prompt=snake_case__ , image=self.raw_image , inpaint_strength=0.7 , generator=snake_case__ ).latents SCREAMING_SNAKE_CASE = pipe( prompt=snake_case__ , mask_image=snake_case__ , image_latents=snake_case__ , generator=snake_case__ , negative_prompt=snake_case__ , inpaint_strength=0.7 , output_type='numpy' , ).images[0] SCREAMING_SNAKE_CASE = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((7_6_8, 7_6_8) ) ) / 2_5_5 ) assert np.abs((expected_image - image).max() ) < 5E-1 def UpperCamelCase ( self : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1' , safety_checker=snake_case__ , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) SCREAMING_SNAKE_CASE = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=snake_case__ ) SCREAMING_SNAKE_CASE = 'a bowl of fruit' SCREAMING_SNAKE_CASE = 'a bowl of pears' SCREAMING_SNAKE_CASE = pipe.generate_mask( image=self.raw_image , source_prompt=snake_case__ , target_prompt=snake_case__ , generator=snake_case__ , ) SCREAMING_SNAKE_CASE = pipe.invert( prompt=snake_case__ , image=self.raw_image , inpaint_strength=0.7 , generator=snake_case__ , num_inference_steps=2_5 , ).latents SCREAMING_SNAKE_CASE = pipe( prompt=snake_case__ , mask_image=snake_case__ , image_latents=snake_case__ , generator=snake_case__ , negative_prompt=snake_case__ , inpaint_strength=0.7 , num_inference_steps=2_5 , output_type='numpy' , ).images[0] SCREAMING_SNAKE_CASE = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((7_6_8, 7_6_8) ) ) / 2_5_5 ) assert np.abs((expected_image - image).max() ) < 5E-1
439
from __future__ import annotations from math import gcd def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 1 , _UpperCamelCase : int = 3 , ) -> int | None: '''simple docstring''' if num < 2: raise ValueError('The input value cannot be less than 2' ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(_UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ) -> int: return (pow(_UpperCamelCase , 2 ) + step) % modulus for _ in range(_UpperCamelCase ): # These track the position within the cycle detection logic. SCREAMING_SNAKE_CASE = seed SCREAMING_SNAKE_CASE = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. SCREAMING_SNAKE_CASE = rand_fn(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) SCREAMING_SNAKE_CASE = rand_fn(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) SCREAMING_SNAKE_CASE = rand_fn(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. SCREAMING_SNAKE_CASE = gcd(hare - tortoise , _UpperCamelCase ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. SCREAMING_SNAKE_CASE = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse a_ : int = argparse.ArgumentParser() parser.add_argument( "num", type=int, help="The value to find a divisor of", ) parser.add_argument( "--attempts", type=int, default=3, help="The number of attempts before giving up", ) a_ : Tuple = parser.parse_args() a_ : int = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(F"""{args.num} is probably prime""") else: a_ : Union[str, Any] = args.num // divisor print(F"""{args.num} = {divisor} * {quotient}""")
439
1
"""simple docstring""" import os # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_doctest_list.py lowerCamelCase = """.""" if __name__ == "__main__": lowerCamelCase = os.path.join(REPO_PATH, """utils/documentation_tests.txt""") lowerCamelCase = [] lowerCamelCase = [] with open(doctest_file_path) as fp: for line in fp: lowerCamelCase = line.strip() lowerCamelCase = os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(path) if len(non_existent_paths) > 0: lowerCamelCase = """\n""".join(non_existent_paths) raise ValueError(F"`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}") if all_paths != sorted(all_paths): raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
711
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCamelCase = logging.get_logger(__name__) class lowercase__ ( SCREAMING_SNAKE_CASE ): '''simple docstring''' UpperCamelCase = ['''pixel_values'''] def __init__( self : str , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : float = None , _UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 255 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , **_UpperCAmelCase : Optional[int] , ) -> None: '''simple docstring''' super().__init__(**_UpperCAmelCase ) UpperCAmelCase_ = size if size is not None else {"shortest_edge": 384} UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase ) UpperCAmelCase_ = do_resize UpperCAmelCase_ = size # Default value set here for backwards compatibility where the value in config is None UpperCAmelCase_ = crop_pct if crop_pct is not None else 224 / 256 UpperCAmelCase_ = resample UpperCAmelCase_ = do_rescale UpperCAmelCase_ = rescale_factor UpperCAmelCase_ = do_normalize UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : float , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Optional[int] , ) -> np.ndarray: '''simple docstring''' UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase ) if "shortest_edge" not in size: raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" ) UpperCAmelCase_ = size["shortest_edge"] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct UpperCAmelCase_ = int(shortest_edge / crop_pct ) UpperCAmelCase_ = get_resize_output_image_size(_UpperCAmelCase , size=_UpperCAmelCase , default_to_square=_UpperCAmelCase ) UpperCAmelCase_ = resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=_UpperCAmelCase , size=(shortest_edge, shortest_edge) , data_format=_UpperCAmelCase , **_UpperCAmelCase ) else: # warping (no cropping) when evaluated at 384 or larger return resize( _UpperCAmelCase , size=(shortest_edge, shortest_edge) , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def lowercase__ ( self : Tuple , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[int, float] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : List[Any] , ) -> Any: '''simple docstring''' return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def lowercase__ ( self : Dict , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : List[str] , ) -> np.ndarray: '''simple docstring''' return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : ImageInput , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : float = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : float = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCAmelCase : Optional[int] , ) -> PIL.Image.Image: '''simple docstring''' UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ = crop_pct if crop_pct is not None else self.crop_pct UpperCAmelCase_ = resample if resample is not None else self.resample UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean UpperCAmelCase_ = image_std if image_std is not None else self.image_std UpperCAmelCase_ = size if size is not None else self.size UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase ) UpperCAmelCase_ = make_list_of_images(_UpperCAmelCase ) if not valid_images(_UpperCAmelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError("crop_pct must be specified if size < 384." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. UpperCAmelCase_ = [to_numpy_array(_UpperCAmelCase ) for image in images] if do_resize: UpperCAmelCase_ = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , crop_pct=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images] if do_rescale: UpperCAmelCase_ = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images] if do_normalize: UpperCAmelCase_ = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images] UpperCAmelCase_ = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images] UpperCAmelCase_ = {"pixel_values": images} return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
14
0
import re def UpperCAmelCase__ ( __magic_name__ : str ): '''simple docstring''' return [char.split() for char in re.split(R'''[^ a-z A-Z 0-9 \s]''' , str_ )] def UpperCAmelCase__ ( __magic_name__ : str ): '''simple docstring''' lowerCAmelCase : Any = split_input(str_ ) return "".join( [''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def UpperCAmelCase__ ( __magic_name__ : str , __magic_name__ : bool , __magic_name__ : str ): '''simple docstring''' try: lowerCAmelCase : Tuple = split_input(__magic_name__ ) if upper: lowerCAmelCase : List[str] = ''''''.join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: lowerCAmelCase : Optional[Any] = ''''''.join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def UpperCAmelCase__ ( __magic_name__ : str ): '''simple docstring''' return to_simple_case(__magic_name__ ) def UpperCAmelCase__ ( __magic_name__ : str ): '''simple docstring''' try: lowerCAmelCase : List[Any] = to_simple_case(__magic_name__ ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def UpperCAmelCase__ ( __magic_name__ : str , __magic_name__ : bool ): '''simple docstring''' return to_complex_case(__magic_name__ , __magic_name__ , '''_''' ) def UpperCAmelCase__ ( __magic_name__ : str , __magic_name__ : bool ): '''simple docstring''' return to_complex_case(__magic_name__ , __magic_name__ , '''-''' ) if __name__ == "__main__": __import__('doctest').testmod()
348
from math import log from scipy.constants import Boltzmann, physical_constants __SCREAMING_SNAKE_CASE : int = 3_00 # TEMPERATURE (unit = K) def UpperCAmelCase__ ( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float , ): '''simple docstring''' if donor_conc <= 0: raise ValueError('''Donor concentration should be positive''' ) elif acceptor_conc <= 0: raise ValueError('''Acceptor concentration should be positive''' ) elif intrinsic_conc <= 0: raise ValueError('''Intrinsic concentration should be positive''' ) elif donor_conc <= intrinsic_conc: raise ValueError( '''Donor concentration should be greater than intrinsic concentration''' ) elif acceptor_conc <= intrinsic_conc: raise ValueError( '''Acceptor concentration should be greater than intrinsic concentration''' ) else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2 ) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
348
1
import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def _lowercase ( lowercase__ ): if "model" in orig_key: __lowerCAmelCase : List[str] = orig_key.replace('''model.''' , '''''' ) if "norm1" in orig_key: __lowerCAmelCase : List[str] = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' ) if "norm2" in orig_key: __lowerCAmelCase : Optional[Any] = orig_key.replace('''norm2''' , '''output.LayerNorm''' ) if "norm" in orig_key: __lowerCAmelCase : int = orig_key.replace('''norm''' , '''LayerNorm''' ) if "transformer" in orig_key: __lowerCAmelCase : Tuple = orig_key.split('''.''' )[0].split('''_''' )[-1] __lowerCAmelCase : Dict = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" ) if "mha.attn" in orig_key: __lowerCAmelCase : Tuple = orig_key.replace('''mha.attn''' , '''attention.self''' ) if "mha" in orig_key: __lowerCAmelCase : Tuple = orig_key.replace('''mha''' , '''attention''' ) if "W_q" in orig_key: __lowerCAmelCase : Optional[int] = orig_key.replace('''W_q''' , '''self.query''' ) if "W_k" in orig_key: __lowerCAmelCase : Optional[Any] = orig_key.replace('''W_k''' , '''self.key''' ) if "W_v" in orig_key: __lowerCAmelCase : int = orig_key.replace('''W_v''' , '''self.value''' ) if "ff1" in orig_key: __lowerCAmelCase : Any = orig_key.replace('''ff1''' , '''intermediate.dense''' ) if "ff2" in orig_key: __lowerCAmelCase : Dict = orig_key.replace('''ff2''' , '''output.dense''' ) if "ff" in orig_key: __lowerCAmelCase : int = orig_key.replace('''ff''' , '''output.dense''' ) if "mlm_class" in orig_key: __lowerCAmelCase : int = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' ) if "mlm" in orig_key: __lowerCAmelCase : List[str] = orig_key.replace('''mlm''' , '''cls.predictions.transform''' ) if "cls" not in orig_key: __lowerCAmelCase : str = '''yoso.''' + orig_key return orig_key def _lowercase ( lowercase__ , lowercase__ ): for key in orig_state_dict.copy().keys(): __lowerCAmelCase : List[str] = orig_state_dict.pop(lowercase__ ) if ("pooler" in key) or ("sen_class" in key): continue else: __lowerCAmelCase : Any = val __lowerCAmelCase : Union[str, Any] = orig_state_dict['''cls.predictions.decoder.bias'''] __lowerCAmelCase : int = torch.arange(lowercase__ ).expand((1, -1) ) + 2 return orig_state_dict def _lowercase ( lowercase__ , lowercase__ , lowercase__ ): __lowerCAmelCase : List[Any] = torch.load(lowercase__ , map_location='''cpu''' )['''model_state_dict'''] __lowerCAmelCase : str = YosoConfig.from_json_file(lowercase__ ) __lowerCAmelCase : Any = YosoForMaskedLM(lowercase__ ) __lowerCAmelCase : List[str] = convert_checkpoint_helper(config.max_position_embeddings , lowercase__ ) print(model.load_state_dict(lowercase__ ) ) model.eval() model.save_pretrained(lowercase__ ) print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The json file for YOSO model config.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) _UpperCamelCase = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
583
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable _UpperCamelCase = { "configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"], "tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ "GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTNeoXJapaneseForCausalLM", "GPTNeoXJapaneseLayer", "GPTNeoXJapaneseModel", "GPTNeoXJapanesePreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import ( GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
583
1
import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def __SCREAMING_SNAKE_CASE ( a__ : Optional[Any] ,a__ : Union[str, Any] ,a__ : Optional[int] ) -> List[Any]: # Initialise PyTorch model __A : Dict = MobileBertConfig.from_json_file(a__ ) print(f"""Building PyTorch model from configuration: {config}""" ) __A : Tuple = MobileBertForPreTraining(a__ ) # Load weights from tf checkpoint __A : Dict = load_tf_weights_in_mobilebert(a__ ,a__ ,a__ ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() ,a__ ) if __name__ == "__main__": UpperCAmelCase_ : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--mobilebert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained MobileBERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) UpperCAmelCase_ : Tuple = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
17
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging UpperCamelCase : Dict = logging.get_logger(__name__) def UpperCamelCase_ ( __a ) -> Union[str, Any]: a__ : Tuple = R"\w+[.]\d+" a__ : List[Any] = re.findall(__a , __a ) for pat in pats: a__ : Union[str, Any] = key.replace(__a , "_".join(pat.split("." ) ) ) return key def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: a__ : List[str] = pt_tuple_key[:-1] + ("scale",) if ( any("norm" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): a__ : Any = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: a__ : Optional[Any] = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: a__ : Union[str, Any] = pt_tuple_key[:-1] + ("embedding",) return renamed_pt_tuple_key, pt_tensor # conv layer a__ : List[str] = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: a__ : str = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer a__ : Tuple = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight": a__ : Tuple = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight a__ : Optional[Any] = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias a__ : Union[str, Any] = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def UpperCamelCase_ ( __a , __a , __a=42 ) -> str: # Step 1: Convert pytorch tensor to numpy a__ : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params a__ : Tuple = flax_model.init_weights(PRNGKey(__a ) ) a__ : Optional[Any] = flatten_dict(__a ) a__ : Union[str, Any] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): a__ : Optional[int] = rename_key(__a ) a__ : Optional[int] = tuple(renamed_pt_key.split("." ) ) # Correctly rename weight parameters a__, a__ : Union[str, Any] = rename_key_and_reshape_tensor(__a , __a , __a ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown a__ : str = jnp.asarray(__a ) return unflatten_dict(__a )
37
0
'''simple docstring''' import tensorflow as tf from ...tf_utils import shape_list class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Optional[int] , __A : str , __A : int , __A : Any , __A : Any , __A : Union[str, Any]=1 , __A : Optional[int]=False , **__A : Union[str, Any] ): """simple docstring""" super().__init__(**__A ) _lowercase = vocab_size _lowercase = d_embed _lowercase = d_proj _lowercase = cutoffs + [vocab_size] _lowercase = [0] + self.cutoffs _lowercase = div_val _lowercase = self.cutoffs[0] _lowercase = len(self.cutoffs ) - 1 _lowercase = self.shortlist_size + self.n_clusters _lowercase = keep_order _lowercase = [] _lowercase = [] def snake_case ( self : List[Any] , __A : Dict ): """simple docstring""" if self.n_clusters > 0: _lowercase = self.add_weight( shape=(self.n_clusters, self.d_embed) , initializer="zeros" , trainable=__A , name="cluster_weight" ) _lowercase = self.add_weight( shape=(self.n_clusters,) , initializer="zeros" , trainable=__A , name="cluster_bias" ) if self.div_val == 1: for i in range(len(self.cutoffs ) ): if self.d_proj != self.d_embed: _lowercase = self.add_weight( shape=(self.d_embed, self.d_proj) , initializer="zeros" , trainable=__A , name=f"""out_projs_._{i}""" , ) self.out_projs.append(__A ) else: self.out_projs.append(__A ) _lowercase = self.add_weight( shape=(self.vocab_size, self.d_embed) , initializer="zeros" , trainable=__A , name=f"""out_layers_._{i}_._weight""" , ) _lowercase = self.add_weight( shape=(self.vocab_size,) , initializer="zeros" , trainable=__A , name=f"""out_layers_._{i}_._bias""" , ) self.out_layers.append((weight, bias) ) else: for i in range(len(self.cutoffs ) ): _lowercase , _lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1] _lowercase = self.d_embed // (self.div_val**i) _lowercase = self.add_weight( shape=(d_emb_i, self.d_proj) , initializer="zeros" , trainable=__A , name=f"""out_projs_._{i}""" ) self.out_projs.append(__A ) _lowercase = self.add_weight( shape=(r_idx - l_idx, d_emb_i) , initializer="zeros" , trainable=__A , name=f"""out_layers_._{i}_._weight""" , ) _lowercase = self.add_weight( shape=(r_idx - l_idx,) , initializer="zeros" , trainable=__A , name=f"""out_layers_._{i}_._bias""" , ) self.out_layers.append((weight, bias) ) super().build(__A ) @staticmethod def snake_case ( __A : int , __A : List[str] , __A : Dict , __A : str=None ): """simple docstring""" _lowercase = x if proj is not None: _lowercase = tf.einsum("ibd,ed->ibe" , __A , __A ) return tf.einsum("ibd,nd->ibn" , __A , __A ) + b @staticmethod def snake_case ( __A : Tuple , __A : Optional[int] ): """simple docstring""" _lowercase = shape_list(__A ) _lowercase = tf.range(lp_size[0] , dtype=target.dtype ) _lowercase = tf.stack([r, target] , 1 ) return tf.gather_nd(__A , __A ) def snake_case ( self : str , __A : List[str] , __A : Any , __A : List[str]=True , __A : Optional[int]=False ): """simple docstring""" _lowercase = 0 if self.n_clusters == 0: _lowercase = self._logit(__A , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] ) if target is not None: _lowercase = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__A , logits=__A ) _lowercase = tf.nn.log_softmax(__A , axis=-1 ) else: _lowercase = shape_list(__A ) _lowercase = [] _lowercase = tf.zeros(hidden_sizes[:2] ) for i in range(len(self.cutoffs ) ): _lowercase , _lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1] if target is not None: _lowercase = (target >= l_idx) & (target < r_idx) _lowercase = tf.where(__A ) _lowercase = tf.boolean_mask(__A , __A ) - l_idx if self.div_val == 1: _lowercase = self.out_layers[0][0][l_idx:r_idx] _lowercase = self.out_layers[0][1][l_idx:r_idx] else: _lowercase = self.out_layers[i][0] _lowercase = self.out_layers[i][1] if i == 0: _lowercase = tf.concat([cur_W, self.cluster_weight] , 0 ) _lowercase = tf.concat([cur_b, self.cluster_bias] , 0 ) _lowercase = self._logit(__A , __A , __A , self.out_projs[0] ) _lowercase = tf.nn.log_softmax(__A ) out.append(head_logprob[..., : self.cutoffs[0]] ) if target is not None: _lowercase = tf.boolean_mask(__A , __A ) _lowercase = self._gather_logprob(__A , __A ) else: _lowercase = self._logit(__A , __A , __A , self.out_projs[i] ) _lowercase = tf.nn.log_softmax(__A ) _lowercase = self.cutoffs[0] + i - 1 # No probability for the head cluster _lowercase = head_logprob[..., cluster_prob_idx, None] + tail_logprob out.append(__A ) if target is not None: _lowercase = tf.boolean_mask(__A , __A ) _lowercase = tf.boolean_mask(__A , __A ) _lowercase = self._gather_logprob(__A , __A ) cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1] if target is not None: loss += tf.scatter_nd(__A , -cur_logprob , shape_list(__A ) ) _lowercase = tf.concat(__A , axis=-1 ) if target is not None: if return_mean: _lowercase = tf.reduce_mean(__A ) # Add the training-time loss value to the layer using `self.add_loss()`. self.add_loss(__A ) # Log the loss as a metric (we could log arbitrary metrics, # including different metrics for training and inference. self.add_metric(__A , name=self.name , aggregation="mean" if return_mean else "" ) return out
602
'''simple docstring''' import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() __magic_name__ : List[Any] = logging.get_logger(__name__) def A__ ( A_ ) -> List[str]: _lowercase = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError("Quantized models are not supported." ) _lowercase = re.match(R"^mobilenet_v1_([^_]*)_([^_]*)$" , A_ ) if matches: _lowercase = float(matches[1] ) _lowercase = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". _lowercase = 1_001 _lowercase = "imagenet-1k-id2label.json" _lowercase = "huggingface/label-files" _lowercase = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) ) _lowercase = {int(A_ ) + 1: v for k, v in idalabel.items()} _lowercase = "background" _lowercase = idalabel _lowercase = {v: k for k, v in idalabel.items()} return config def A__ ( ) -> str: _lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg" _lowercase = Image.open(requests.get(A_ , stream=A_ ).raw ) return im @torch.no_grad() def A__ ( A_ , A_ , A_ , A_=False ) -> List[Any]: _lowercase = get_mobilenet_va_config(A_ ) # Load 🤗 model _lowercase = MobileNetVaForImageClassification(A_ ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(A_ , A_ , A_ ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor _lowercase = MobileNetVaImageProcessor( crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 32} , ) _lowercase = image_processor(images=prepare_img() , return_tensors="pt" ) _lowercase = model(**A_ ) _lowercase = outputs.logits assert logits.shape == (1, 1_001) if model_name == "mobilenet_v1_1.0_224": _lowercase = torch.tensor([-4.1739, -1.1233, 3.1205] ) elif model_name == "mobilenet_v1_0.75_192": _lowercase = torch.tensor([-3.9440, -2.3141, -0.3333] ) else: _lowercase = None if expected_logits is not None: assert torch.allclose(logits[0, :3] , A_ , atol=1e-4 ) Path(A_ ).mkdir(exist_ok=A_ ) print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(A_ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(A_ ) if push_to_hub: print("Pushing to the hub..." ) _lowercase = "google/" + model_name image_processor.push_to_hub(A_ ) model.push_to_hub(A_ ) if __name__ == "__main__": __magic_name__ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''mobilenet_v1_1.0_224''', type=str, help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''', ) parser.add_argument( '''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) __magic_name__ : Union[str, Any] = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
602
1
import logging import os from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union from filelock import FileLock from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available lowercase_ = logging.getLogger(__name__) @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 lowerCAmelCase_ = None lowerCAmelCase_ = None class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''train''' lowerCAmelCase_ = '''dev''' lowerCAmelCase_ = '''test''' class __UpperCamelCase : """simple docstring""" @staticmethod def UpperCAmelCase__ ( _A : int , _A : Union[Split, str] ): """simple docstring""" raise NotImplementedError @staticmethod def UpperCAmelCase__ ( _A : str ): """simple docstring""" raise NotImplementedError @staticmethod def UpperCAmelCase__ ( _A : List[InputExample] , _A : List[str] , _A : int , _A : PreTrainedTokenizer , _A : Tuple=False , _A : List[str]="[CLS]" , _A : Tuple=1 , _A : str="[SEP]" , _A : Dict=False , _A : int=False , _A : Union[str, Any]=0 , _A : str=0 , _A : Optional[Any]=-100 , _A : int=0 , _A : Any=True , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = {label: i for i, label in enumerate(_A )} __SCREAMING_SNAKE_CASE : Dict = [] for ex_index, example in enumerate(_A ): if ex_index % 1_0000 == 0: logger.info('''Writing example %d of %d''' , _A , len(_A ) ) __SCREAMING_SNAKE_CASE : str = [] __SCREAMING_SNAKE_CASE : Tuple = [] for word, label in zip(example.words , example.labels ): __SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.tokenize(_A ) # bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space. if len(_A ) > 0: tokens.extend(_A ) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_A ) - 1) ) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. __SCREAMING_SNAKE_CASE : Tuple = tokenizer.num_special_tokens_to_add() if len(_A ) > max_seq_length - special_tokens_count: __SCREAMING_SNAKE_CASE : str = tokens[: (max_seq_length - special_tokens_count)] __SCREAMING_SNAKE_CASE : Union[str, Any] = label_ids[: (max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] label_ids += [pad_token_label_id] __SCREAMING_SNAKE_CASE : Any = [sequence_a_segment_id] * len(_A ) if cls_token_at_end: tokens += [cls_token] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: __SCREAMING_SNAKE_CASE : Optional[Any] = [cls_token] + tokens __SCREAMING_SNAKE_CASE : str = [pad_token_label_id] + label_ids __SCREAMING_SNAKE_CASE : int = [cls_token_segment_id] + segment_ids __SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.convert_tokens_to_ids(_A ) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. __SCREAMING_SNAKE_CASE : Optional[Any] = [1 if mask_padding_with_zero else 0] * len(_A ) # Zero-pad up to the sequence length. __SCREAMING_SNAKE_CASE : List[Any] = max_seq_length - len(_A ) if pad_on_left: __SCREAMING_SNAKE_CASE : Union[str, Any] = ([pad_token] * padding_length) + input_ids __SCREAMING_SNAKE_CASE : int = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask __SCREAMING_SNAKE_CASE : Any = ([pad_token_segment_id] * padding_length) + segment_ids __SCREAMING_SNAKE_CASE : List[str] = ([pad_token_label_id] * padding_length) + label_ids else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length assert len(_A ) == max_seq_length assert len(_A ) == max_seq_length assert len(_A ) == max_seq_length assert len(_A ) == max_seq_length if ex_index < 5: logger.info('''*** Example ***''' ) logger.info('''guid: %s''' , example.guid ) logger.info('''tokens: %s''' , ''' '''.join([str(_A ) for x in tokens] ) ) logger.info('''input_ids: %s''' , ''' '''.join([str(_A ) for x in input_ids] ) ) logger.info('''input_mask: %s''' , ''' '''.join([str(_A ) for x in input_mask] ) ) logger.info('''segment_ids: %s''' , ''' '''.join([str(_A ) for x in segment_ids] ) ) logger.info('''label_ids: %s''' , ''' '''.join([str(_A ) for x in label_ids] ) ) if "token_type_ids" not in tokenizer.model_input_names: __SCREAMING_SNAKE_CASE : List[Any] = None features.append( InputFeatures( input_ids=_A , attention_mask=_A , token_type_ids=_A , label_ids=_A ) ) return features if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = 42 lowerCAmelCase_ = nn.CrossEntropyLoss().ignore_index def __init__( self : List[Any] , _A : TokenClassificationTask , _A : str , _A : PreTrainedTokenizer , _A : List[str] , _A : str , _A : Optional[int] = None , _A : Optional[int]=False , _A : Split = Split.train , ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = os.path.join( _A , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(_A ) ) , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __SCREAMING_SNAKE_CASE : str = cached_features_file + '''.lock''' with FileLock(_A ): if os.path.exists(_A ) and not overwrite_cache: logger.info(F'''Loading features from cached file {cached_features_file}''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(_A ) else: logger.info(F'''Creating features from dataset file at {data_dir}''' ) __SCREAMING_SNAKE_CASE : Dict = token_classification_task.read_examples_from_file(_A , _A ) # TODO clean up all this to leverage built-in features of tokenizers __SCREAMING_SNAKE_CASE : Dict = token_classification_task.convert_examples_to_features( _A , _A , _A , _A , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_A , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info(F'''Saving features into cached file {cached_features_file}''' ) torch.save(self.features , _A ) def __len__( self : Union[str, Any] ): """simple docstring""" return len(self.features ) def __getitem__( self : Union[str, Any] , _A : Dict ): """simple docstring""" return self.features[i] if is_tf_available(): import tensorflow as tf class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = 42 lowerCAmelCase_ = -1_00 def __init__( self : Dict , _A : TokenClassificationTask , _A : str , _A : PreTrainedTokenizer , _A : List[str] , _A : str , _A : Optional[int] = None , _A : List[Any]=False , _A : Split = Split.train , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = token_classification_task.read_examples_from_file(_A , _A ) # TODO clean up all this to leverage built-in features of tokenizers __SCREAMING_SNAKE_CASE : List[str] = token_classification_task.convert_examples_to_features( _A , _A , _A , _A , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_A , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) def gen(): for ex in self.features: if ex.token_type_ids is None: yield ( {"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label_ids, ) else: yield ( { "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label_ids, ) if "token_type_ids" not in tokenizer.model_input_names: __SCREAMING_SNAKE_CASE : int = tf.data.Dataset.from_generator( _A , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , ( {'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )}, tf.TensorShape([None] ), ) , ) else: __SCREAMING_SNAKE_CASE : Union[str, Any] = tf.data.Dataset.from_generator( _A , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , ( { '''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] ), '''token_type_ids''': tf.TensorShape([None] ), }, tf.TensorShape([None] ), ) , ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) ) return self.dataset def __len__( self : Union[str, Any] ): """simple docstring""" return len(self.features ) def __getitem__( self : Union[str, Any] , _A : Dict ): """simple docstring""" return self.features[i]
74
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging lowercase_ = logging.get_logger(__name__) def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = set() __SCREAMING_SNAKE_CASE : str = [] def parse_line(snake_case ): for line in fp: if isinstance(snake_case , snake_case ): __SCREAMING_SNAKE_CASE : List[Any] = line.decode('''UTF-8''' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(''' ''' ): # process a single warning and move it to `selected_warnings`. if len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : List[Any] = '''\n'''.join(snake_case ) # Only keep the warnings specified in `targets` if any(F''': {x}: ''' in warning for x in targets ): selected_warnings.add(snake_case ) buffer.clear() continue else: __SCREAMING_SNAKE_CASE : int = line.strip() buffer.append(snake_case ) if from_gh: for filename in os.listdir(snake_case ): __SCREAMING_SNAKE_CASE : Any = os.path.join(snake_case , snake_case ) if not os.path.isdir(snake_case ): # read the file if filename != "warnings.txt": continue with open(snake_case ) as fp: parse_line(snake_case ) else: try: with zipfile.ZipFile(snake_case ) as z: for filename in z.namelist(): if not os.path.isdir(snake_case ): # read the file if filename != "warnings.txt": continue with z.open(snake_case ) as fp: parse_line(snake_case ) except Exception: logger.warning( F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' ) return selected_warnings def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = set() __SCREAMING_SNAKE_CASE : List[Any] = [os.path.join(snake_case , snake_case ) for p in os.listdir(snake_case ) if (p.endswith('''.zip''' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(snake_case , snake_case ) ) return selected_warnings if __name__ == "__main__": def a__ ( snake_case ): """simple docstring""" return values.split(''',''' ) lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") # optional parameters parser.add_argument( """--targets""", default="""DeprecationWarning,UserWarning,FutureWarning""", type=list_str, help="""Comma-separated list of target warning(s) which we want to extract.""", ) parser.add_argument( """--from_gh""", action="""store_true""", help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""", ) lowercase_ = parser.parse_args() lowercase_ = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links lowercase_ = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("""=""" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts lowercase_ = extract_warnings(args.output_dir, args.targets) lowercase_ = sorted(selected_warnings) with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
74
1
from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _lowercase : '''simple docstring''' def __init__( self :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict=13 , lowerCAmelCase__ :Any=30 , lowerCAmelCase__ :List[str]=2 , lowerCAmelCase__ :str=3 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :List[str]=32 , lowerCAmelCase__ :List[str]=2 , lowerCAmelCase__ :Optional[int]=4 , lowerCAmelCase__ :Optional[int]=37 , lowerCAmelCase__ :List[str]="gelu" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :List[Any]=10 , lowerCAmelCase__ :str=0.02 , lowerCAmelCase__ :str=3 , lowerCAmelCase__ :Union[str, Any]=0.6 , lowerCAmelCase__ :str=None , ) -> int: __SCREAMING_SNAKE_CASE : Optional[int] = parent __SCREAMING_SNAKE_CASE : List[Any] = batch_size __SCREAMING_SNAKE_CASE : Optional[int] = image_size __SCREAMING_SNAKE_CASE : Optional[Any] = patch_size __SCREAMING_SNAKE_CASE : Tuple = num_channels __SCREAMING_SNAKE_CASE : Tuple = is_training __SCREAMING_SNAKE_CASE : Optional[int] = use_labels __SCREAMING_SNAKE_CASE : List[Any] = hidden_size __SCREAMING_SNAKE_CASE : int = num_hidden_layers __SCREAMING_SNAKE_CASE : Dict = num_attention_heads __SCREAMING_SNAKE_CASE : List[str] = intermediate_size __SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act __SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size __SCREAMING_SNAKE_CASE : Dict = initializer_range __SCREAMING_SNAKE_CASE : Any = mask_ratio __SCREAMING_SNAKE_CASE : str = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) __SCREAMING_SNAKE_CASE : int = (image_size // patch_size) ** 2 __SCREAMING_SNAKE_CASE : int = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def __magic_name__( self :Tuple ) -> str: __SCREAMING_SNAKE_CASE : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = None if self.use_labels: __SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE : Tuple = self.get_config() return config, pixel_values, labels def __magic_name__( self :Union[str, Any] ) -> List[str]: return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any ) -> List[Any]: __SCREAMING_SNAKE_CASE : Optional[int] = TFViTMAEModel(config=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ , training=lowerCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __magic_name__( self :Any , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[Any] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE : Optional[int] = TFViTMAEForPreTraining(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , training=lowerCAmelCase__ ) # expected sequence length = num_patches __SCREAMING_SNAKE_CASE : int = (self.image_size // self.patch_size) ** 2 __SCREAMING_SNAKE_CASE : Union[str, Any] = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images __SCREAMING_SNAKE_CASE : List[str] = 1 __SCREAMING_SNAKE_CASE : Optional[Any] = TFViTMAEForPreTraining(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __SCREAMING_SNAKE_CASE : int = model(lowerCAmelCase__ , training=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Dict = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def __magic_name__( self :Optional[Any] ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs() ((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) : Optional[Any] = config_and_inputs __SCREAMING_SNAKE_CASE : Union[str, Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class _lowercase ( A__ , A__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () SCREAMING_SNAKE_CASE__ : Optional[Any] = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {} SCREAMING_SNAKE_CASE__ : Dict = False SCREAMING_SNAKE_CASE__ : int = False SCREAMING_SNAKE_CASE__ : Union[str, Any] = False SCREAMING_SNAKE_CASE__ : Union[str, Any] = False def __magic_name__( self :str ) -> Tuple: __SCREAMING_SNAKE_CASE : Union[str, Any] = TFViTMAEModelTester(self ) __SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 ) def __magic_name__( self :Tuple ) -> Union[str, Any]: self.config_tester.run_common_tests() @unittest.skip(reason='''ViTMAE does not use inputs_embeds''' ) def __magic_name__( self :Dict ) -> str: pass def __magic_name__( self :List[Any] ) -> Tuple: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE : int = model_class(lowerCAmelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) __SCREAMING_SNAKE_CASE : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCAmelCase__ , tf.keras.layers.Layer ) ) def __magic_name__( self :str ) -> Dict: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE : List[Any] = model_class(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __SCREAMING_SNAKE_CASE : int = [*signature.parameters.keys()] __SCREAMING_SNAKE_CASE : Optional[int] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowerCAmelCase__ ) def __magic_name__( self :Optional[Any] ) -> List[Any]: __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def __magic_name__( self :List[Any] ) -> str: __SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase__ ) def __magic_name__( self :List[str] ) -> List[Any]: # make the mask reproducible np.random.seed(2 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() __SCREAMING_SNAKE_CASE : str = int((config.image_size // config.patch_size) ** 2 ) __SCREAMING_SNAKE_CASE : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : str = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ , noise=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = copy.deepcopy(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) __SCREAMING_SNAKE_CASE : List[Any] = model(**lowerCAmelCase__ , noise=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Union[str, Any] = outputs_dict[0].numpy() __SCREAMING_SNAKE_CASE : Any = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 ) def __magic_name__( self :List[str] ) -> str: # make the mask reproducible np.random.seed(2 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common() __SCREAMING_SNAKE_CASE : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 ) __SCREAMING_SNAKE_CASE : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(lowerCAmelCase__ :Any ): __SCREAMING_SNAKE_CASE : Tuple = {} for k, v in inputs_dict.items(): if tf.is_tensor(lowerCAmelCase__ ): __SCREAMING_SNAKE_CASE : Optional[int] = v.numpy() else: __SCREAMING_SNAKE_CASE : Dict = np.array(lowerCAmelCase__ ) return inputs_np_dict for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE : Any = model_class(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[Any] = prepare_numpy_arrays(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Dict = model(lowerCAmelCase__ , noise=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Any = model(**lowerCAmelCase__ , noise=lowerCAmelCase__ ) self.assert_outputs_same(lowerCAmelCase__ , lowerCAmelCase__ ) def __magic_name__( self :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int ) -> Dict: # make masks reproducible np.random.seed(2 ) __SCREAMING_SNAKE_CASE : Any = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) __SCREAMING_SNAKE_CASE : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) __SCREAMING_SNAKE_CASE : int = tf.constant(lowerCAmelCase__ ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument __SCREAMING_SNAKE_CASE : Union[str, Any] = tf_noise super().check_pt_tf_models(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def __magic_name__( self :List[Any] ) -> int: # make mask reproducible np.random.seed(2 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common() __SCREAMING_SNAKE_CASE : List[str] = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(lowerCAmelCase__ ) if module_member_name.endswith('''MainLayer''' ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )] for module_member in (getattr(lowerCAmelCase__ , lowerCAmelCase__ ),) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(lowerCAmelCase__ , '''_keras_serializable''' , lowerCAmelCase__ ) } __SCREAMING_SNAKE_CASE : Dict = int((config.image_size // config.patch_size) ** 2 ) __SCREAMING_SNAKE_CASE : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) __SCREAMING_SNAKE_CASE : List[str] = tf.convert_to_tensor(lowerCAmelCase__ ) inputs_dict.update({'''noise''': noise} ) for main_layer_class in tf_main_layer_classes: __SCREAMING_SNAKE_CASE : Optional[int] = main_layer_class(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : str = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } __SCREAMING_SNAKE_CASE : Dict = tf.keras.Model(lowerCAmelCase__ , outputs=main_layer(lowerCAmelCase__ ) ) __SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ ) with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE : Dict = os.path.join(lowerCAmelCase__ , '''keras_model.h5''' ) model.save(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Any = tf.keras.models.load_model( lowerCAmelCase__ , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(lowerCAmelCase__ , tf.keras.Model ) __SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCAmelCase__ ) self.assert_outputs_same(lowerCAmelCase__ , lowerCAmelCase__ ) @slow def __magic_name__( self :Dict ) -> str: # make mask reproducible np.random.seed(2 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __SCREAMING_SNAKE_CASE : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 ) __SCREAMING_SNAKE_CASE : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE : Any = model_class(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Any = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ , noise=lowerCAmelCase__ ) if model_class.__name__ == "TFViTMAEModel": __SCREAMING_SNAKE_CASE : Any = outputs.last_hidden_state.numpy() __SCREAMING_SNAKE_CASE : Dict = 0 else: __SCREAMING_SNAKE_CASE : Tuple = outputs.logits.numpy() __SCREAMING_SNAKE_CASE : List[Any] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCAmelCase__ , saved_model=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : int = model_class.from_pretrained(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , noise=lowerCAmelCase__ ) if model_class.__name__ == "TFViTMAEModel": __SCREAMING_SNAKE_CASE : str = after_outputs['''last_hidden_state'''].numpy() __SCREAMING_SNAKE_CASE : Tuple = 0 else: __SCREAMING_SNAKE_CASE : Optional[int] = after_outputs['''logits'''].numpy() __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 __SCREAMING_SNAKE_CASE : Union[str, Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCAmelCase__ , 1E-5 ) def __magic_name__( self :Union[str, Any] ) -> Union[str, Any]: # make mask reproducible np.random.seed(2 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() __SCREAMING_SNAKE_CASE : str = int((config.image_size // config.patch_size) ** 2 ) __SCREAMING_SNAKE_CASE : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE : Optional[int] = model_class(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ , noise=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[Any] = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[Any] = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config __SCREAMING_SNAKE_CASE : Optional[int] = model_class.from_config(model.config ) __SCREAMING_SNAKE_CASE : int = new_model(lowerCAmelCase__ ) # Build model new_model.set_weights(model.get_weights() ) __SCREAMING_SNAKE_CASE : List[Any] = new_model(lowerCAmelCase__ , noise=lowerCAmelCase__ ) self.assert_outputs_same(lowerCAmelCase__ , lowerCAmelCase__ ) @unittest.skip( reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.''' ) def __magic_name__( self :List[Any] ) -> List[Any]: pass @unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' ) def __magic_name__( self :List[str] ) -> Tuple: pass @slow def __magic_name__( self :Tuple ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE : Dict = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' ) self.assertIsNotNone(lowerCAmelCase__ ) def _UpperCamelCase ( ): __SCREAMING_SNAKE_CASE : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class _lowercase ( unittest.TestCase ): '''simple docstring''' @cached_property def __magic_name__( self :Union[str, Any] ) -> Any: return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None @slow def __magic_name__( self :Optional[Any] ) -> Union[str, Any]: # make random mask reproducible across the PT and TF model np.random.seed(2 ) __SCREAMING_SNAKE_CASE : Dict = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ) __SCREAMING_SNAKE_CASE : List[Any] = self.default_image_processor __SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_img() __SCREAMING_SNAKE_CASE : Any = image_processor(images=lowerCAmelCase__ , return_tensors='''tf''' ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) __SCREAMING_SNAKE_CASE : Tuple = ViTMAEConfig() __SCREAMING_SNAKE_CASE : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.uniform(size=(1, num_patches) ) # forward pass __SCREAMING_SNAKE_CASE : Tuple = model(**lowerCAmelCase__ , noise=lowerCAmelCase__ ) # verify the logits __SCREAMING_SNAKE_CASE : Union[str, Any] = tf.convert_to_tensor([1, 196, 768] ) self.assertEqual(outputs.logits.shape , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[Any] = tf.convert_to_tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 )
260
from heapq import heappop, heappush import numpy as np def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , ): __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = grid.shape __SCREAMING_SNAKE_CASE : Any = [-1, 1, 0, 0] __SCREAMING_SNAKE_CASE : Any = [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = [(0, source)], set() __SCREAMING_SNAKE_CASE : Optional[Any] = np.full((rows, cols) , np.inf ) __SCREAMING_SNAKE_CASE : Tuple = 0 __SCREAMING_SNAKE_CASE : int = np.empty((rows, cols) , dtype=lowercase__ ) __SCREAMING_SNAKE_CASE : List[str] = None while queue: ((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) : List[str] = heappop(lowercase__ ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: __SCREAMING_SNAKE_CASE : int = [] while (x, y) != source: path.append((x, y) ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = predecessors[x, y] path.append(lowercase__ ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(lowercase__ ) ): __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: __SCREAMING_SNAKE_CASE : Optional[int] = grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(lowercase__ , (dist + 1, (nx, ny)) ) __SCREAMING_SNAKE_CASE : Optional[int] = dist + 1 __SCREAMING_SNAKE_CASE : Optional[Any] = (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
260
1
from math import factorial class a : def __init__( self :Dict ,__lowercase :Union[str, Any] ,__lowercase :List[Any] ): snake_case__ : List[Any] = real if isinstance(lowerCamelCase__ ,lowerCamelCase__ ): snake_case__ : Dict = [1] * rank else: snake_case__ : int = rank def __repr__( self :List[Any] ): return ( F"""{self.real}+""" F"""{'+'.join(str(lowerCamelCase__ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}""" ) def __lowerCamelCase ( self :Optional[int] ): snake_case__ : Union[str, Any] = self.duals.copy() while cur[-1] == 0: cur.pop(-1 ) return Dual(self.real ,lowerCamelCase__ ) def __add__( self :Dict ,__lowercase :str ): if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ): return Dual(self.real + other ,self.duals ) snake_case__ : Any = self.duals.copy() snake_case__ : int = other.duals.copy() if len(lowerCamelCase__ ) > len(lowerCamelCase__ ): o_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) ) elif len(lowerCamelCase__ ) < len(lowerCamelCase__ ): s_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) ) snake_case__ : List[Any] = [] for i in range(len(lowerCamelCase__ ) ): new_duals.append(s_dual[i] + o_dual[i] ) return Dual(self.real + other.real ,lowerCamelCase__ ) __lowerCAmelCase : Tuple = __add__ def __sub__( self :str ,__lowercase :int ): return self + other * -1 def __mul__( self :List[str] ,__lowercase :int ): if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ): snake_case__ : List[Any] = [] for i in self.duals: new_duals.append(i * other ) return Dual(self.real * other ,lowerCamelCase__ ) snake_case__ : str = [0] * (len(self.duals ) + len(other.duals ) + 1) for i, item in enumerate(self.duals ): for j, jtem in enumerate(other.duals ): new_duals[i + j + 1] += item * jtem for k in range(len(self.duals ) ): new_duals[k] += self.duals[k] * other.real for index in range(len(other.duals ) ): new_duals[index] += other.duals[index] * self.real return Dual(self.real * other.real ,lowerCamelCase__ ) __lowerCAmelCase : List[str] = __mul__ def __truediv__( self :List[Any] ,__lowercase :Tuple ): if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ): snake_case__ : Optional[int] = [] for i in self.duals: new_duals.append(i / other ) return Dual(self.real / other ,lowerCamelCase__ ) raise ValueError def __floordiv__( self :Optional[int] ,__lowercase :List[str] ): if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ): snake_case__ : Tuple = [] for i in self.duals: new_duals.append(i // other ) return Dual(self.real // other ,lowerCamelCase__ ) raise ValueError def __pow__( self :str ,__lowercase :int ): if n < 0 or isinstance(lowerCamelCase__ ,lowerCamelCase__ ): raise ValueError('''power must be a positive integer''' ) if n == 0: return 1 if n == 1: return self snake_case__ : Tuple = self for _ in range(n - 1 ): x *= self return x def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]: """simple docstring""" if not callable(__lowerCAmelCase ): raise ValueError('''differentiate() requires a function as input for func''' ) if not isinstance(__lowerCAmelCase , (float, int) ): raise ValueError('''differentiate() requires a float as input for position''' ) if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError('''differentiate() requires an int as input for order''' ) snake_case__ : int = Dual(__lowerCAmelCase , 1 ) snake_case__ : str = func(__lowerCAmelCase ) if order == 0: return result.real return result.duals[order - 1] * factorial(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() def _lowerCAmelCase ( __lowerCAmelCase ) -> str: """simple docstring""" return y**2 * y**4 print(differentiate(f, 9, 2))
252
"""simple docstring""" from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class _UpperCamelCase : __lowerCamelCase = BlenderbotSmallConfig __lowerCamelCase = {} __lowerCamelCase = "gelu" def __init__(self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=9_9 , lowerCamelCase__=3_2 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=2_0 , lowerCamelCase__=2 , lowerCamelCase__=1 , lowerCamelCase__=0 , ): """simple docstring""" A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = eos_token_id A__ = pad_token_id A__ = bos_token_id def A (self ): """simple docstring""" A__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) A__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) A__ = tf.concat([input_ids, eos_tensor] , axis=1 ) A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) A__ = prepare_blenderbot_small_inputs_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) return config, inputs_dict def A (self , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" A__ = TFBlenderbotSmallModel(config=lowerCamelCase__ ).get_decoder() A__ = inputs_dict["""input_ids"""] A__ = input_ids[:1, :] A__ = inputs_dict["""attention_mask"""][:1, :] A__ = inputs_dict["""head_mask"""] A__ = 1 # first forward pass A__ = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , head_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ ) A__ ,A__ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids A__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) A__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and A__ = tf.concat([input_ids, next_tokens] , axis=-1 ) A__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) A__ = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0] A__ = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice A__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) A__ = output_from_no_past[:, -3:, random_slice_idx] A__ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowerCamelCase__ , lowerCamelCase__ , rtol=1E-3 ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase : List[str] , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : int=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : int=None , UpperCamelCase : Dict=None , ): if attention_mask is None: A__ = tf.cast(tf.math.not_equal(UpperCamelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: A__ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: A__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _UpperCamelCase ( __snake_case , __snake_case , unittest.TestCase): __lowerCamelCase = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) __lowerCamelCase = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () __lowerCamelCase = ( { "conversational": TFBlenderbotSmallForConditionalGeneration, "feature-extraction": TFBlenderbotSmallModel, "summarization": TFBlenderbotSmallForConditionalGeneration, "text2text-generation": TFBlenderbotSmallForConditionalGeneration, "translation": TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) __lowerCamelCase = True __lowerCamelCase = False __lowerCamelCase = False def A (self ): """simple docstring""" A__ = TFBlenderbotSmallModelTester(self ) A__ = ConfigTester(self , config_class=lowerCamelCase__ ) def A (self ): """simple docstring""" self.config_tester.run_common_tests() def A (self ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase__ ) @require_tokenizers @require_tf class _UpperCamelCase ( unittest.TestCase): __lowerCamelCase = [ "Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like " " i'm going to throw up.\nand why is that?" ] __lowerCamelCase = "facebook/blenderbot_small-90M" @cached_property def A (self ): """simple docstring""" # use "old" tokenizer here because of bug when downloading new tokenizer return BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" ) @cached_property def A (self ): """simple docstring""" A__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def A (self ): """simple docstring""" A__ = self.tokenizer(self.src_text , return_tensors="""tf""" ) A__ = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowerCamelCase__ , ) A__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowerCamelCase__ )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
574
0
'''simple docstring''' from typing import List import jiwer import jiwer.transforms as tr from packaging import version import datasets from datasets.config import PY_VERSION if PY_VERSION < version.parse("3.8"): import importlib_metadata else: import importlib.metadata as importlib_metadata UpperCAmelCase_ = "" if version.parse(importlib_metadata.version("jiwer")) < version.parse("2.3.0"): class __lowercase ( tr.AbstractTransform ): def __init__( self , UpperCamelCase = " " ) -> Tuple: __a = sentence_delimiter def UpperCamelCase__ ( self , UpperCamelCase ) -> Union[str, Any]: return list(UpperCamelCase ) def UpperCamelCase__ ( self , UpperCamelCase ) -> int: __a = [] for sent_idx, sentence in enumerate(UpperCamelCase ): chars.extend(self.process_string(UpperCamelCase ) ) if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(UpperCamelCase ) - 1: chars.append(self.sentence_delimiter ) return chars UpperCAmelCase_ = tr.Compose( [tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)] ) else: UpperCAmelCase_ = tr.Compose( [ tr.RemoveMultipleSpaces(), tr.Strip(), tr.ReduceToSingleSentence(SENTENCE_DELIMITER), tr.ReduceToListOfListOfChars(), ] ) UpperCAmelCase_ = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n" UpperCAmelCase_ = "\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n" UpperCAmelCase_ = "\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowercase ( datasets.Metric ): def UpperCamelCase__ ( self ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[ 'https://en.wikipedia.org/wiki/Word_error_rate', 'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates', ] , ) def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase=False ) -> int: if concatenate_texts: return jiwer.compute_measures( UpperCamelCase , UpperCamelCase , truth_transform=UpperCamelCase , hypothesis_transform=UpperCamelCase , )["wer"] __a = 0 __a = 0 for prediction, reference in zip(UpperCamelCase , UpperCamelCase ): __a = jiwer.compute_measures( UpperCamelCase , UpperCamelCase , truth_transform=UpperCamelCase , hypothesis_transform=UpperCamelCase , ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
490
'''simple docstring''' import os from datetime import datetime as dt from github import Github UpperCAmelCase_ = [ "good first issue", "good second issue", "good difficult issue", "enhancement", "new pipeline/model", "new scheduler", "wip", ] def SCREAMING_SNAKE_CASE ( ): __a = Github(os.environ['GITHUB_TOKEN'] ) __a = g.get_repo('huggingface/diffusers' ) __a = repo.get_issues(state='open' ) for issue in open_issues: __a = sorted(issue.get_comments() , key=lambda a_ : i.created_at , reverse=a_ ) __a = comments[0] if len(a_ ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state='closed' ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state='open' ) issue.remove_from_labels('stale' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( 'This issue has been automatically marked as stale because it has not had ' 'recent activity. If you think this still needs to be addressed ' 'please comment on this thread.\n\nPlease note that issues that do not follow the ' '[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) ' 'are likely to be ignored.' ) issue.add_to_labels('stale' ) if __name__ == "__main__": main()
490
1
import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING a__ : str = logging.get_logger(__name__) a__ : Optional[Any] = { 'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json', # See all DETR models at https://huggingface.co/models?filter=detr } class lowercase ( __lowercase ): """simple docstring""" snake_case_ = 'detr' snake_case_ = ['past_key_values'] snake_case_ = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : Tuple , a_ : Union[str, Any]=True , a_ : Dict=None , a_ : Union[str, Any]=3 , a_ : Dict=1_00 , a_ : Union[str, Any]=6 , a_ : Optional[Any]=20_48 , a_ : Optional[Any]=8 , a_ : List[Any]=6 , a_ : List[Any]=20_48 , a_ : str=8 , a_ : Union[str, Any]=0.0 , a_ : Optional[Any]=0.0 , a_ : Union[str, Any]=True , a_ : Dict="relu" , a_ : Tuple=2_56 , a_ : List[Any]=0.1 , a_ : Optional[int]=0.0 , a_ : Optional[int]=0.0 , a_ : Union[str, Any]=0.0_2 , a_ : Union[str, Any]=1.0 , a_ : str=False , a_ : int="sine" , a_ : Dict="resnet50" , a_ : Tuple=True , a_ : int=False , a_ : str=1 , a_ : Optional[int]=5 , a_ : Any=2 , a_ : str=1 , a_ : Optional[int]=1 , a_ : Tuple=5 , a_ : Any=2 , a_ : Union[str, Any]=0.1 , **a_ : Tuple , ): """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) lowerCamelCase__ = CONFIG_MAPPING["resnet"](out_features=["""stage4"""] ) elif isinstance(a_ , a_ ): lowerCamelCase__ = backbone_config.get("""model_type""" ) lowerCamelCase__ = CONFIG_MAPPING[backbone_model_type] lowerCamelCase__ = config_class.from_dict(a_ ) # set timm attributes to None lowerCamelCase__ = None, None, None lowerCamelCase__ = use_timm_backbone lowerCamelCase__ = backbone_config lowerCamelCase__ = num_channels lowerCamelCase__ = num_queries lowerCamelCase__ = d_model lowerCamelCase__ = encoder_ffn_dim lowerCamelCase__ = encoder_layers lowerCamelCase__ = encoder_attention_heads lowerCamelCase__ = decoder_ffn_dim lowerCamelCase__ = decoder_layers lowerCamelCase__ = decoder_attention_heads lowerCamelCase__ = dropout lowerCamelCase__ = attention_dropout lowerCamelCase__ = activation_dropout lowerCamelCase__ = activation_function lowerCamelCase__ = init_std lowerCamelCase__ = init_xavier_std lowerCamelCase__ = encoder_layerdrop lowerCamelCase__ = decoder_layerdrop lowerCamelCase__ = encoder_layers lowerCamelCase__ = auxiliary_loss lowerCamelCase__ = position_embedding_type lowerCamelCase__ = backbone lowerCamelCase__ = use_pretrained_backbone lowerCamelCase__ = dilation # Hungarian matcher lowerCamelCase__ = class_cost lowerCamelCase__ = bbox_cost lowerCamelCase__ = giou_cost # Loss coefficients lowerCamelCase__ = mask_loss_coefficient lowerCamelCase__ = dice_loss_coefficient lowerCamelCase__ = bbox_loss_coefficient lowerCamelCase__ = giou_loss_coefficient lowerCamelCase__ = eos_coefficient super().__init__(is_encoder_decoder=a_ , **a_ ) @property def _UpperCamelCase ( self : Optional[int] ): """simple docstring""" return self.encoder_attention_heads @property def _UpperCamelCase ( self : List[Any] ): """simple docstring""" return self.d_model @classmethod def _UpperCamelCase ( cls : str , a_ : PretrainedConfig , **a_ : List[str] ): """simple docstring""" return cls(backbone_config=a_ , **a_ ) def _UpperCamelCase ( self : List[Any] ): """simple docstring""" lowerCamelCase__ = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: lowerCamelCase__ = self.backbone_config.to_dict() lowerCamelCase__ = self.__class__.model_type return output class lowercase ( __lowercase ): """simple docstring""" snake_case_ = version.parse('1.11' ) @property def _UpperCamelCase ( self : List[Any] ): """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""pixel_mask""", {0: """batch"""}), ] ) @property def _UpperCamelCase ( self : List[Any] ): """simple docstring""" return 1e-5 @property def _UpperCamelCase ( self : Optional[Any] ): """simple docstring""" return 12
165
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> int: '''simple docstring''' __UpperCamelCase : Tuple = 1 for i in range(1 , num + 1): fact *= i return fact def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> int: '''simple docstring''' __UpperCamelCase : Dict = 0 while number > 0: __UpperCamelCase : List[Any] = number % 10 sum_of_digits += last_digit __UpperCamelCase : List[Any] = number // 10 # Removing the last_digit from the given number return sum_of_digits def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 100) -> int: '''simple docstring''' __UpperCamelCase : Optional[Any] = factorial(_lowerCamelCase) __UpperCamelCase : Tuple = split_and_add(_lowerCamelCase) return result if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
557
0
import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class __magic_name__ : def __init__( self : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[int]=13 , lowerCamelCase__ : Any=7 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : int=True , lowerCamelCase__ : List[str]=99 , lowerCamelCase__ : Optional[int]=64 , lowerCamelCase__ : Optional[int]=32 , lowerCamelCase__ : Optional[int]=5 , lowerCamelCase__ : Tuple=4 , lowerCamelCase__ : Optional[Any]=37 , lowerCamelCase__ : List[str]="gelu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Dict=0.1 , lowerCamelCase__ : Optional[Any]=512 , lowerCamelCase__ : Optional[int]=16 , lowerCamelCase__ : List[Any]=2 , lowerCamelCase__ : Tuple=0.02 , lowerCamelCase__ : Optional[int]=3 , lowerCamelCase__ : Optional[Any]=4 , lowerCamelCase__ : Any=None , ) -> List[str]: '''simple docstring''' UpperCamelCase__ : str = parent UpperCamelCase__ : List[Any] = batch_size UpperCamelCase__ : Dict = seq_length UpperCamelCase__ : Optional[Any] = is_training UpperCamelCase__ : Any = use_input_mask UpperCamelCase__ : List[Any] = use_token_type_ids UpperCamelCase__ : str = use_labels UpperCamelCase__ : List[str] = vocab_size UpperCamelCase__ : List[str] = hidden_size UpperCamelCase__ : Union[str, Any] = embedding_size UpperCamelCase__ : int = num_hidden_layers UpperCamelCase__ : str = num_attention_heads UpperCamelCase__ : Any = intermediate_size UpperCamelCase__ : Dict = hidden_act UpperCamelCase__ : Optional[int] = hidden_dropout_prob UpperCamelCase__ : int = attention_probs_dropout_prob UpperCamelCase__ : str = max_position_embeddings UpperCamelCase__ : List[Any] = type_vocab_size UpperCamelCase__ : str = type_sequence_label_size UpperCamelCase__ : Dict = initializer_range UpperCamelCase__ : Optional[Any] = num_labels UpperCamelCase__ : Optional[Any] = num_choices UpperCamelCase__ : Union[str, Any] = scope def UpperCAmelCase__ ( self : Dict ) -> Optional[int]: '''simple docstring''' UpperCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ : str = None if self.use_input_mask: UpperCamelCase__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase__ : str = None if self.use_token_type_ids: UpperCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase__ : int = None UpperCamelCase__ : Optional[Any] = None UpperCamelCase__ : Optional[Any] = None if self.use_labels: UpperCamelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase__ : Any = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase__ : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Optional[int] ) -> Dict: '''simple docstring''' return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : str , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any] ) -> Dict: '''simple docstring''' UpperCamelCase__ : int = MegatronBertModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() UpperCamelCase__ : List[str] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ ) UpperCamelCase__ : Optional[int] = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ ) UpperCamelCase__ : int = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase__ : List[str] = MegatronBertForMaskedLM(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() UpperCamelCase__ : int = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Any , lowerCamelCase__ : str ) -> str: '''simple docstring''' UpperCamelCase__ : Any = MegatronBertForCausalLM(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() UpperCamelCase__ : Optional[int] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any] ) -> Tuple: '''simple docstring''' UpperCamelCase__ : str = MegatronBertForNextSentencePrediction(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() UpperCamelCase__ : Tuple = model( lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : List[str] ) -> Tuple: '''simple docstring''' UpperCamelCase__ : Union[str, Any] = MegatronBertForPreTraining(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() UpperCamelCase__ : Optional[int] = model( lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , next_sentence_label=lowerCamelCase__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ) -> Optional[int]: '''simple docstring''' UpperCamelCase__ : Dict = MegatronBertForQuestionAnswering(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() UpperCamelCase__ : int = model( lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple , lowerCamelCase__ : int ) -> Tuple: '''simple docstring''' UpperCamelCase__ : Dict = self.num_labels UpperCamelCase__ : Any = MegatronBertForSequenceClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() UpperCamelCase__ : Any = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any ) -> int: '''simple docstring''' UpperCamelCase__ : List[str] = self.num_labels UpperCamelCase__ : str = MegatronBertForTokenClassification(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() UpperCamelCase__ : int = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : int , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] ) -> Optional[Any]: '''simple docstring''' UpperCamelCase__ : Optional[Any] = self.num_choices UpperCamelCase__ : Tuple = MegatronBertForMultipleChoice(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() UpperCamelCase__ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase__ : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase__ : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase__ : Any = model( lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase__ ( self : Any ) -> Optional[Any]: '''simple docstring''' UpperCamelCase__ : str = self.prepare_config_and_inputs() ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) : Any = config_and_inputs UpperCamelCase__ : Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __magic_name__ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase): A: List[Any] = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) A: Optional[Any] = ( { "feature-extraction": MegatronBertModel, "fill-mask": MegatronBertForMaskedLM, "question-answering": MegatronBertForQuestionAnswering, "text-classification": MegatronBertForSequenceClassification, "text-generation": MegatronBertForCausalLM, "token-classification": MegatronBertForTokenClassification, "zero-shot": MegatronBertForSequenceClassification, } if is_torch_available() else {} ) A: Tuple = True # test_resize_embeddings = False A: List[str] = False def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any]=False ) -> List[Any]: '''simple docstring''' UpperCamelCase__ : List[Any] = super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ ) if return_labels: if model_class in get_values(lowerCamelCase__ ): UpperCamelCase__ : Dict = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCamelCase__ ) UpperCamelCase__ : Optional[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase__ ) return inputs_dict def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict: '''simple docstring''' UpperCamelCase__ : int = MegatronBertModelTester(self ) UpperCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 ) def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Dict ) -> Tuple: '''simple docstring''' UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*lowerCamelCase__ ) def UpperCAmelCase__ ( self : int ) -> Optional[Any]: '''simple docstring''' UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowerCamelCase__ ) def UpperCAmelCase__ ( self : int ) -> List[Any]: '''simple docstring''' UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowerCamelCase__ ) def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowerCamelCase__ ) def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple: '''simple docstring''' UpperCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowerCamelCase__ ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowerCamelCase__ ) def UpperCAmelCase__ ( self : int ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowerCamelCase__ ) def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowerCamelCase__ ) def _a ( SCREAMING_SNAKE_CASE : Any ): """simple docstring""" return torch.tensor( SCREAMING_SNAKE_CASE , dtype=torch.long , device=SCREAMING_SNAKE_CASE , ) __UpperCamelCase : Dict = 1E-4 @require_torch @require_sentencepiece @require_tokenizers class __magic_name__ ( unittest.TestCase): @slow @unittest.skip('''Model is not available.''' ) def UpperCAmelCase__ ( self : Any ) -> Dict: '''simple docstring''' UpperCamelCase__ : Any = '''nvidia/megatron-bert-uncased-345m''' if "MYDIR" in os.environ: UpperCamelCase__ : Union[str, Any] = os.path.join(os.environ['''MYDIR'''] , lowerCamelCase__ ) UpperCamelCase__ : Dict = MegatronBertModel.from_pretrained(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.half() UpperCamelCase__ : str = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] ) with torch.no_grad(): UpperCamelCase__ : Any = model(lowerCamelCase__ )[0] UpperCamelCase__ : int = torch.Size((1, 9, 1024) ) self.assertEqual(output.shape , lowerCamelCase__ ) UpperCamelCase__ : List[str] = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728] for ii in range(3 ): for jj in range(3 ): UpperCamelCase__ : Optional[Any] = output[0, ii, jj] UpperCamelCase__ : Dict = expected[3 * ii + jj] UpperCamelCase__ : str = '''ii={} jj={} a={} b={}'''.format(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) self.assertTrue(math.isclose(lowerCamelCase__ , lowerCamelCase__ , rel_tol=lowerCamelCase__ , abs_tol=lowerCamelCase__ ) , msg=lowerCamelCase__ )
106
import functools def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ): """simple docstring""" UpperCamelCase__ : str = len(SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Optional[Any] = len(SCREAMING_SNAKE_CASE ) @functools.cache def min_distance(SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa UpperCamelCase__ : Tuple = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , SCREAMING_SNAKE_CASE ) , 1 + min_distance(SCREAMING_SNAKE_CASE , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
106
1
'''simple docstring''' from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig UpperCamelCase_ = logging.get_logger(__name__) # General docstring UpperCamelCase_ = """RegNetConfig""" # Base docstring UpperCamelCase_ = """facebook/regnet-y-040""" UpperCamelCase_ = [1, 1088, 7, 7] # Image classification docstring UpperCamelCase_ = """facebook/regnet-y-040""" UpperCamelCase_ = """tabby, tabby cat""" UpperCamelCase_ = [ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class __SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[str] = "relu" , ): '''simple docstring''' super().__init__() lowercase : Tuple =nn.Convad( UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=UpperCAmelCase__ , stride=UpperCAmelCase__ , padding=kernel_size // 2 , groups=UpperCAmelCase__ , bias=UpperCAmelCase__ , ) lowercase : Optional[int] =nn.BatchNormad(UpperCAmelCase__ ) lowercase : Any =ACTaFN[activation] if activation is not None else nn.Identity() def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : Any ): '''simple docstring''' lowercase : List[Any] =self.convolution(UpperCAmelCase__ ) lowercase : int =self.normalization(UpperCAmelCase__ ) lowercase : Tuple =self.activation(UpperCAmelCase__ ) return hidden_state class __SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self : str , UpperCAmelCase__ : RegNetConfig ): '''simple docstring''' super().__init__() lowercase : Tuple =RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act ) lowercase : Optional[int] =config.num_channels def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : List[str] ): '''simple docstring''' lowercase : Optional[Any] =pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) lowercase : List[Any] =self.embedder(UpperCAmelCase__ ) return hidden_state class __SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 2 ): '''simple docstring''' super().__init__() lowercase : Tuple =nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , stride=UpperCAmelCase__ , bias=UpperCAmelCase__ ) lowercase : List[Any] =nn.BatchNormad(UpperCAmelCase__ ) def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Tensor ): '''simple docstring''' lowercase : List[str] =self.convolution(UpperCAmelCase__ ) lowercase : Any =self.normalization(UpperCAmelCase__ ) return hidden_state class __SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ): '''simple docstring''' super().__init__() lowercase : Union[str, Any] =nn.AdaptiveAvgPoolad((1, 1) ) lowercase : Dict =nn.Sequential( nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 ) , nn.ReLU() , nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 ) , nn.Sigmoid() , ) def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[Any] ): '''simple docstring''' # b c h w -> b c 1 1 lowercase : int =self.pooler(UpperCAmelCase__ ) lowercase : List[str] =self.attention(UpperCAmelCase__ ) lowercase : List[str] =hidden_state * attention return hidden_state class __SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self : str , UpperCAmelCase__ : RegNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 1 ): '''simple docstring''' super().__init__() lowercase : Any =in_channels != out_channels or stride != 1 lowercase : int =max(1 , out_channels // config.groups_width ) lowercase : Optional[int] =( RegNetShortCut(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ ) if should_apply_shortcut else nn.Identity() ) lowercase : Tuple =nn.Sequential( RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ , groups=UpperCAmelCase__ , activation=config.hidden_act ) , RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , activation=UpperCAmelCase__ ) , ) lowercase : str =ACTaFN[config.hidden_act] def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[Any] ): '''simple docstring''' lowercase : str =hidden_state lowercase : int =self.layer(UpperCAmelCase__ ) lowercase : Any =self.shortcut(UpperCAmelCase__ ) hidden_state += residual lowercase : Union[str, Any] =self.activation(UpperCAmelCase__ ) return hidden_state class __SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self : List[Any] , UpperCAmelCase__ : RegNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 1 ): '''simple docstring''' super().__init__() lowercase : str =in_channels != out_channels or stride != 1 lowercase : Optional[int] =max(1 , out_channels // config.groups_width ) lowercase : str =( RegNetShortCut(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ ) if should_apply_shortcut else nn.Identity() ) lowercase : Union[str, Any] =nn.Sequential( RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ , groups=UpperCAmelCase__ , activation=config.hidden_act ) , RegNetSELayer(UpperCAmelCase__ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , activation=UpperCAmelCase__ ) , ) lowercase : Union[str, Any] =ACTaFN[config.hidden_act] def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Tuple ): '''simple docstring''' lowercase : Dict =hidden_state lowercase : Any =self.layer(UpperCAmelCase__ ) lowercase : Dict =self.shortcut(UpperCAmelCase__ ) hidden_state += residual lowercase : str =self.activation(UpperCAmelCase__ ) return hidden_state class __SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self : Dict , UpperCAmelCase__ : RegNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , ): '''simple docstring''' super().__init__() lowercase : Union[str, Any] =RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer lowercase : List[Any] =nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ , ) , *[layer(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) for _ in range(depth - 1 )] , ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase : List[Any] =self.layers(UpperCAmelCase__ ) return hidden_state class __SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self : int , UpperCAmelCase__ : RegNetConfig ): '''simple docstring''' super().__init__() lowercase : str =nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( UpperCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) lowercase : List[str] =zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(UpperCAmelCase__ , config.depths[1:] ): self.stages.append(RegNetStage(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , depth=UpperCAmelCase__ ) ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Tensor , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True ): '''simple docstring''' lowercase : List[str] =() if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: lowercase : List[Any] =hidden_states + (hidden_state,) lowercase : int =stage_module(UpperCAmelCase__ ) if output_hidden_states: lowercase : Tuple =hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=UpperCAmelCase__ , hidden_states=UpperCAmelCase__ ) class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = RegNetConfig lowerCamelCase_ = 'regnet' lowerCamelCase_ = 'pixel_values' lowerCamelCase_ = True def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple ): '''simple docstring''' if isinstance(UpperCAmelCase__ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' ) elif isinstance(UpperCAmelCase__ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int]=False ): '''simple docstring''' if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowercase : int =value UpperCamelCase_ = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ UpperCamelCase_ = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( 'The bare RegNet model outputting raw features without any specific head on top.' , lowercase__ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class __SCREAMING_SNAKE_CASE ( lowercase__ ): def __init__( self : Any , UpperCAmelCase__ : List[Any] ): '''simple docstring''' super().__init__(UpperCAmelCase__ ) lowercase : str =config lowercase : Any =RegNetEmbeddings(UpperCAmelCase__ ) lowercase : Union[str, Any] =RegNetEncoder(UpperCAmelCase__ ) lowercase : Any =nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UpperCAmelCase__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Tensor , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None ): '''simple docstring''' lowercase : str =( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase : Dict =return_dict if return_dict is not None else self.config.use_return_dict lowercase : Tuple =self.embedder(UpperCAmelCase__ ) lowercase : Optional[Any] =self.encoder( UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ ) lowercase : Optional[int] =encoder_outputs[0] lowercase : Optional[int] =self.pooler(UpperCAmelCase__ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=UpperCAmelCase__ , pooler_output=UpperCAmelCase__ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( '\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , lowercase__ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class __SCREAMING_SNAKE_CASE ( lowercase__ ): def __init__( self : str , UpperCAmelCase__ : Any ): '''simple docstring''' super().__init__(UpperCAmelCase__ ) lowercase : Dict =config.num_labels lowercase : Union[str, Any] =RegNetModel(UpperCAmelCase__ ) # classification head lowercase : Optional[int] =nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UpperCAmelCase__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCamelCase_ ( self : str , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[torch.LongTensor] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None , ): '''simple docstring''' lowercase : Dict =return_dict if return_dict is not None else self.config.use_return_dict lowercase : Union[str, Any] =self.regnet(UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ ) lowercase : int =outputs.pooler_output if return_dict else outputs[1] lowercase : Optional[int] =self.classifier(UpperCAmelCase__ ) lowercase : Any =None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowercase : Optional[int] ='''regression''' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowercase : List[str] ='''single_label_classification''' else: lowercase : int ='''multi_label_classification''' if self.config.problem_type == "regression": lowercase : str =MSELoss() if self.num_labels == 1: lowercase : Union[str, Any] =loss_fct(logits.squeeze() , labels.squeeze() ) else: lowercase : str =loss_fct(UpperCAmelCase__ , UpperCAmelCase__ ) elif self.config.problem_type == "single_label_classification": lowercase : Tuple =CrossEntropyLoss() lowercase : Union[str, Any] =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowercase : Optional[int] =BCEWithLogitsLoss() lowercase : List[Any] =loss_fct(UpperCAmelCase__ , UpperCAmelCase__ ) if not return_dict: lowercase : str =(logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=UpperCAmelCase__ , logits=UpperCAmelCase__ , hidden_states=outputs.hidden_states )
92
'''simple docstring''' import datasets from .evaluate import evaluate _lowerCAmelCase = "\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n" _lowerCAmelCase = "\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n" _lowerCAmelCase = "\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the CUAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\n 'aupr': Area Under the Precision-Recall curve\n 'prec_at_80_recall': Precision at 80% recall\n 'prec_at_90_recall': Precision at 90% recall\nExamples:\n >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __A ( datasets.Metric ): """simple docstring""" def snake_case_( self )-> int: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': { '''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ), }, '''references''': { '''id''': datasets.Value('''string''' ), '''answers''': datasets.features.Sequence( { '''text''': datasets.Value('''string''' ), '''answer_start''': datasets.Value('''int32''' ), } ), }, } ) , codebase_urls=['''https://www.atticusprojectai.org/cuad'''] , reference_urls=['''https://www.atticusprojectai.org/cuad'''] , ) def snake_case_( self , _lowerCamelCase , _lowerCamelCase )-> Optional[int]: lowercase__ = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions} lowercase__ = [ { '''paragraphs''': [ { '''qas''': [ { '''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']], '''id''': ref['''id'''], } for ref in references ] } ] } ] lowercase__ = evaluate(dataset=_lowerCamelCase , predictions=_lowerCamelCase ) return score
161
0
from PIL import Image def _A ( lowerCamelCase , lowerCamelCase ): def brightness(lowerCamelCase ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError("level must be between -255.0 (black) and 255.0 (white)" ) return img.point(lowerCamelCase ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change brightness to 100 SCREAMING_SNAKE_CASE__ : List[str] = change_brightness(img, 1_0_0) brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
629
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Any = { """google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""", """google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""", """google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""", """google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""", # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : Optional[int] = """mobilenet_v2""" def __init__( self , snake_case=3 , snake_case=224 , snake_case=1.0 , snake_case=8 , snake_case=8 , snake_case=6 , snake_case=32 , snake_case=True , snake_case=True , snake_case="relu6" , snake_case=True , snake_case=0.8 , snake_case=0.02 , snake_case=0.001 , snake_case=255 , **snake_case , ) -> int: """simple docstring""" super().__init__(**snake_case ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) a__ : str = num_channels a__ : Dict = image_size a__ : Any = depth_multiplier a__ : str = depth_divisible_by a__ : Optional[int] = min_depth a__ : Dict = expand_ratio a__ : str = output_stride a__ : Optional[int] = first_layer_is_expansion a__ : Union[str, Any] = finegrained_output a__ : Union[str, Any] = hidden_act a__ : str = tf_padding a__ : List[Any] = classifier_dropout_prob a__ : List[Any] = initializer_range a__ : Optional[Any] = layer_norm_eps a__ : str = semantic_loss_ignore_index class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : Any = version.parse("""1.11""" ) @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict([("pixel_values", {0: "batch"})] ) @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def _snake_case ( self ) -> float: """simple docstring""" return 1E-4
629
1
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class lowercase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[str] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) UpperCamelCase__ :int = get_activation('''gelu''' ) self.assertTrue(torch.allclose(gelu_python(UpperCamelCase_ ) , torch_builtin(UpperCamelCase_ ) ) ) self.assertFalse(torch.allclose(gelu_python(UpperCamelCase_ ) , gelu_new(UpperCamelCase_ ) ) ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :int = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) UpperCamelCase__ :Union[str, Any] = get_activation('''gelu''' ) UpperCamelCase__ :Tuple = get_activation('''gelu_10''' ) UpperCamelCase__ :Union[str, Any] = torch_builtin(UpperCamelCase_ ) UpperCamelCase__ :Dict = geluaa(UpperCamelCase_ ) UpperCamelCase__ :Optional[Any] = torch.where(y_gelu_aa < 10.0 , 1 , 0 ) self.assertTrue(torch.max(UpperCamelCase_ ).item() == 10.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def lowerCAmelCase__ ( self ): '''simple docstring''' get_activation('''gelu''' ) get_activation('''gelu_10''' ) get_activation('''gelu_fast''' ) get_activation('''gelu_new''' ) get_activation('''gelu_python''' ) get_activation('''gelu_pytorch_tanh''' ) get_activation('''linear''' ) get_activation('''mish''' ) get_activation('''quick_gelu''' ) get_activation('''relu''' ) get_activation('''sigmoid''' ) get_activation('''silu''' ) get_activation('''swish''' ) get_activation('''tanh''' ) with self.assertRaises(UpperCamelCase_ ): get_activation('''bogus''' ) with self.assertRaises(UpperCamelCase_ ): get_activation(UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :str = get_activation('''gelu''' ) UpperCamelCase__ :Optional[Any] = 1 UpperCamelCase__ :Union[str, Any] = get_activation('''gelu''' ) self.assertEqual(acta.a , 1 ) with self.assertRaises(UpperCamelCase_ ): UpperCamelCase__ :Tuple = acta.a
189
'''simple docstring''' import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": __snake_case = argparse.ArgumentParser() parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument( '''--txt2img_unclip''', default='''kakaobrain/karlo-v1-alpha''', type=str, required=False, help='''The pretrained txt2img unclip.''', ) __snake_case = parser.parse_args() __snake_case = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) __snake_case = CLIPImageProcessor() __snake_case = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''') __snake_case = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
189
1
from ..utils import DummyObject, requires_backends class _UpperCamelCase ( metaclass=_A ): '''simple docstring''' __UpperCamelCase : Optional[Any] = ["""flax"""] def __init__( self : Any , *snake_case_ : Union[str, Any] , **snake_case_ : Dict ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : str , *snake_case_ : Optional[Any] , **snake_case_ : List[Any] ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : str , *snake_case_ : int , **snake_case_ : int ): requires_backends(cls , ["""flax"""] ) class _UpperCamelCase ( metaclass=_A ): '''simple docstring''' __UpperCamelCase : Optional[int] = ["""flax"""] def __init__( self : Tuple , *snake_case_ : Any , **snake_case_ : Union[str, Any] ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : str , *snake_case_ : str , **snake_case_ : str ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : Any , *snake_case_ : Dict , **snake_case_ : Optional[Any] ): requires_backends(cls , ["""flax"""] ) class _UpperCamelCase ( metaclass=_A ): '''simple docstring''' __UpperCamelCase : List[Any] = ["""flax"""] def __init__( self : int , *snake_case_ : int , **snake_case_ : Optional[int] ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : Dict , *snake_case_ : Any , **snake_case_ : List[str] ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : Union[str, Any] , *snake_case_ : Optional[int] , **snake_case_ : List[Any] ): requires_backends(cls , ["""flax"""] ) class _UpperCamelCase ( metaclass=_A ): '''simple docstring''' __UpperCamelCase : List[str] = ["""flax"""] def __init__( self : str , *snake_case_ : List[Any] , **snake_case_ : int ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : Optional[Any] , *snake_case_ : str , **snake_case_ : Optional[int] ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : str , *snake_case_ : List[str] , **snake_case_ : List[Any] ): requires_backends(cls , ["""flax"""] ) class _UpperCamelCase ( metaclass=_A ): '''simple docstring''' __UpperCamelCase : List[str] = ["""flax"""] def __init__( self : int , *snake_case_ : List[str] , **snake_case_ : List[str] ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : Optional[Any] , *snake_case_ : str , **snake_case_ : Dict ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : Union[str, Any] , *snake_case_ : List[Any] , **snake_case_ : List[Any] ): requires_backends(cls , ["""flax"""] ) class _UpperCamelCase ( metaclass=_A ): '''simple docstring''' __UpperCamelCase : Tuple = ["""flax"""] def __init__( self : Optional[Any] , *snake_case_ : List[Any] , **snake_case_ : Tuple ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : int , *snake_case_ : Optional[int] , **snake_case_ : Any ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : int , *snake_case_ : int , **snake_case_ : Union[str, Any] ): requires_backends(cls , ["""flax"""] ) class _UpperCamelCase ( metaclass=_A ): '''simple docstring''' __UpperCamelCase : Union[str, Any] = ["""flax"""] def __init__( self : List[Any] , *snake_case_ : Union[str, Any] , **snake_case_ : Optional[int] ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : Tuple , *snake_case_ : Union[str, Any] , **snake_case_ : str ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : int , *snake_case_ : List[str] , **snake_case_ : Optional[int] ): requires_backends(cls , ["""flax"""] ) class _UpperCamelCase ( metaclass=_A ): '''simple docstring''' __UpperCamelCase : Union[str, Any] = ["""flax"""] def __init__( self : Dict , *snake_case_ : int , **snake_case_ : List[Any] ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : Tuple , *snake_case_ : str , **snake_case_ : str ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : Optional[Any] , *snake_case_ : List[Any] , **snake_case_ : Union[str, Any] ): requires_backends(cls , ["""flax"""] ) class _UpperCamelCase ( metaclass=_A ): '''simple docstring''' __UpperCamelCase : List[Any] = ["""flax"""] def __init__( self : Tuple , *snake_case_ : str , **snake_case_ : int ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : List[Any] , *snake_case_ : Optional[int] , **snake_case_ : List[Any] ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : Tuple , *snake_case_ : List[str] , **snake_case_ : Optional[int] ): requires_backends(cls , ["""flax"""] ) class _UpperCamelCase ( metaclass=_A ): '''simple docstring''' __UpperCamelCase : Dict = ["""flax"""] def __init__( self : str , *snake_case_ : Optional[int] , **snake_case_ : Any ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : List[str] , *snake_case_ : Optional[int] , **snake_case_ : Optional[int] ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : List[Any] , *snake_case_ : str , **snake_case_ : Tuple ): requires_backends(cls , ["""flax"""] ) class _UpperCamelCase ( metaclass=_A ): '''simple docstring''' __UpperCamelCase : List[str] = ["""flax"""] def __init__( self : Union[str, Any] , *snake_case_ : Optional[Any] , **snake_case_ : Optional[int] ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : int , *snake_case_ : List[Any] , **snake_case_ : List[Any] ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : Union[str, Any] , *snake_case_ : str , **snake_case_ : Any ): requires_backends(cls , ["""flax"""] ) class _UpperCamelCase ( metaclass=_A ): '''simple docstring''' __UpperCamelCase : Any = ["""flax"""] def __init__( self : Union[str, Any] , *snake_case_ : str , **snake_case_ : Optional[Any] ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : Optional[int] , *snake_case_ : Union[str, Any] , **snake_case_ : Optional[int] ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : Optional[int] , *snake_case_ : int , **snake_case_ : int ): requires_backends(cls , ["""flax"""] ) class _UpperCamelCase ( metaclass=_A ): '''simple docstring''' __UpperCamelCase : List[str] = ["""flax"""] def __init__( self : List[str] , *snake_case_ : Union[str, Any] , **snake_case_ : Optional[Any] ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : Optional[int] , *snake_case_ : str , **snake_case_ : int ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : Optional[int] , *snake_case_ : List[Any] , **snake_case_ : Optional[Any] ): requires_backends(cls , ["""flax"""] )
708
import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class _UpperCamelCase ( _A , unittest.TestCase ): '''simple docstring''' __UpperCamelCase : Optional[int] = BarthezTokenizer __UpperCamelCase : str = BarthezTokenizerFast __UpperCamelCase : str = True __UpperCamelCase : List[Any] = True def lowerCAmelCase__ ( self : Optional[int] ): super().setUp() UpperCamelCase_: Tuple = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case_ ) UpperCamelCase_: Dict = tokenizer def lowerCAmelCase__ ( self : List[str] ): UpperCamelCase_: str = """<pad>""" UpperCamelCase_: int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ ) def lowerCAmelCase__ ( self : Dict ): UpperCamelCase_: Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(vocab_keys[-1] , """<mask>""" ) self.assertEqual(len(snake_case_ ) , 10_1122 ) def lowerCAmelCase__ ( self : Dict ): self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 ) @require_torch def lowerCAmelCase__ ( self : int ): UpperCamelCase_: Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] UpperCamelCase_: Union[str, Any] = [0, 57, 3018, 7_0307, 91, 2] UpperCamelCase_: Union[str, Any] = self.tokenizer( snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , truncation=snake_case_ , return_tensors="""pt""" ) self.assertIsInstance(snake_case_ , snake_case_ ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) UpperCamelCase_: Any = batch.input_ids.tolist()[0] self.assertListEqual(snake_case_ , snake_case_ ) def lowerCAmelCase__ ( self : Any ): if not self.test_rust_tokenizer: return UpperCamelCase_: Optional[Any] = self.get_tokenizer() UpperCamelCase_: Union[str, Any] = self.get_rust_tokenizer() UpperCamelCase_: str = """I was born in 92000, and this is falsé.""" UpperCamelCase_: str = tokenizer.tokenize(snake_case_ ) UpperCamelCase_: int = rust_tokenizer.tokenize(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) UpperCamelCase_: int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) UpperCamelCase_: int = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) UpperCamelCase_: List[str] = self.get_rust_tokenizer() UpperCamelCase_: Tuple = tokenizer.encode(snake_case_ ) UpperCamelCase_: Tuple = rust_tokenizer.encode(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) @slow def lowerCAmelCase__ ( self : int ): # fmt: off UpperCamelCase_: Optional[Any] = {"""input_ids""": [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. UpperCamelCase_: str = [ """Le transformeur est un modèle d'apprentissage profond introduit en 2017, """ """utilisé principalement dans le domaine du traitement automatique des langues (TAL).""", """À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """ """pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """ """telles que la traduction et la synthèse de texte.""", ] self.tokenizer_integration_test_util( expected_encoding=snake_case_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=snake_case_ , )
670
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ : List[Any] = { """configuration_x_clip""": [ """XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XCLIPConfig""", """XCLIPTextConfig""", """XCLIPVisionConfig""", ], """processing_x_clip""": ["""XCLIPProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Optional[Any] = [ """XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """XCLIPModel""", """XCLIPPreTrainedModel""", """XCLIPTextModel""", """XCLIPVisionModel""", ] if TYPE_CHECKING: from .configuration_x_clip import ( XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig, ) from .processing_x_clip import XCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_x_clip import ( XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel, ) else: import sys lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
# Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union lowercase : int = re.compile(R"""^(?P<major>\d+)""" R"""\.(?P<minor>\d+)""" R"""\.(?P<patch>\d+)$""") @total_ordering @dataclass class a__ : _A = 42 _A = None _A = None _A = None _A = None def lowerCAmelCase ( self : Any ) -> List[Any]: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_: Union[str, Any] = _str_to_version_tuple(self.version_str ) def __repr__( self : Tuple ) -> Any: """simple docstring""" return f"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}""" @property def lowerCAmelCase ( self : Tuple ) -> Dict: """simple docstring""" return self.major, self.minor, self.patch def lowerCAmelCase ( self : Dict , A_ : List[str] ) -> Optional[int]: """simple docstring""" if isinstance(A_ , A_ ): return Version(A_ ) elif isinstance(A_ , A_ ): return other raise TypeError(f"""{other} (type {type(A_ )}) cannot be compared to version.""" ) def __eq__( self : List[Any] , A_ : Optional[Any] ) -> Dict: """simple docstring""" try: lowerCamelCase_: Any = self._validate_operand(A_ ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : str , A_ : Union[str, Any] ) -> str: """simple docstring""" lowerCamelCase_: Union[str, Any] = self._validate_operand(A_ ) return self.tuple < other.tuple def __hash__( self : str ) -> List[Any]: """simple docstring""" return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def lowerCAmelCase ( cls : Union[str, Any] , A_ : List[str] ) -> Optional[int]: """simple docstring""" lowerCamelCase_: Optional[Any] = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def lowerCAmelCase ( self : int ) -> str: """simple docstring""" return self.version_str def UpperCAmelCase_ ( _UpperCAmelCase ): lowerCamelCase_: Optional[int] = _VERSION_REG.match(_UpperCAmelCase ) if not res: raise ValueError(f"""Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.""" ) return tuple(int(_UpperCAmelCase ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] ) def UpperCAmelCase_ ( _UpperCAmelCase ): return ".".join(str(_UpperCAmelCase ) for v in version_tuple )
423
0
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import KarrasVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __magic_name__ ( UpperCAmelCase_): UpperCamelCase__ = 42 UpperCamelCase__ = 42 def __init__( self : Any , lowercase_ : List[str] , lowercase_ : Tuple ): super().__init__() self.register_modules(unet=_lowercase , scheduler=_lowercase ) @torch.no_grad() def __call__( self : Union[str, Any] , lowercase_ : Any = 1 , lowercase_ : int = 50 , lowercase_ : str = None , lowercase_ : str = "pil" , lowercase_ : Optional[Any] = True , **lowercase_ : str , ): lowercase_ : int = self.unet.config.sample_size lowercase_ : Optional[Any] = (batch_size, 3, img_size, img_size) lowercase_ : Union[str, Any] = self.unet # sample x_0 ~ N(0, sigma_0^2 * I) lowercase_ : Dict = randn_tensor(_lowercase , generator=_lowercase , device=self.device ) * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(_lowercase ) for t in self.progress_bar(self.scheduler.timesteps ): # here sigma_t == t_i from the paper lowercase_ : Any = self.scheduler.schedule[t] lowercase_ : Any = self.scheduler.schedule[t - 1] if t > 0 else 0 # 1. Select temporarily increased noise level sigma_hat # 2. Add new noise to move from sample_i to sample_hat lowercase_ : Union[str, Any] = self.scheduler.add_noise_to_input(_lowercase , _lowercase , generator=_lowercase ) # 3. Predict the noise residual given the noise magnitude `sigma_hat` # The model inputs and output are adjusted by following eq. (213) in [1]. lowercase_ : Optional[int] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample # 4. Evaluate dx/dt at sigma_hat # 5. Take Euler step from sigma to sigma_prev lowercase_ : Optional[Any] = self.scheduler.step(_lowercase , _lowercase , _lowercase , _lowercase ) if sigma_prev != 0: # 6. Apply 2nd order correction # The model inputs and output are adjusted by following eq. (213) in [1]. lowercase_ : Dict = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample lowercase_ : Tuple = self.scheduler.step_correct( _lowercase , _lowercase , _lowercase , _lowercase , step_output.prev_sample , step_output["""derivative"""] , ) lowercase_ : Dict = step_output.prev_sample lowercase_ : Tuple = (sample / 2 + 0.5).clamp(0 , 1 ) lowercase_ : Tuple = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowercase_ : Optional[int] = self.numpy_to_pil(_lowercase ) if not return_dict: return (image,) return ImagePipelineOutput(images=_lowercase )
707
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase : List[Any] = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : int = [ "FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FocalNetForImageClassification", "FocalNetForMaskedImageModeling", "FocalNetBackbone", "FocalNetModel", "FocalNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys _lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
30
0
'''simple docstring''' import os def lowerCAmelCase_ ( ): with open(os.path.dirname(UpperCamelCase_ ) + """/grid.txt""" ) as f: __SCREAMING_SNAKE_CASE : Union[str, Any] = [] # noqa: E741 for _ in range(20 ): l.append([int(UpperCamelCase_ ) for x in f.readline().split()] ) __SCREAMING_SNAKE_CASE : int = 0 # right for i in range(20 ): for j in range(17 ): __SCREAMING_SNAKE_CASE : Tuple = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: __SCREAMING_SNAKE_CASE : str = temp # down for i in range(17 ): for j in range(20 ): __SCREAMING_SNAKE_CASE : str = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: __SCREAMING_SNAKE_CASE : List[str] = temp # diagonal 1 for i in range(17 ): for j in range(17 ): __SCREAMING_SNAKE_CASE : Optional[int] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: __SCREAMING_SNAKE_CASE : Any = temp # diagonal 2 for i in range(17 ): for j in range(3 , 20 ): __SCREAMING_SNAKE_CASE : Optional[int] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: __SCREAMING_SNAKE_CASE : Tuple = temp return maximum if __name__ == "__main__": print(solution())
578
import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def _lowercase ( UpperCamelCase_ ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE__ = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', '_float_tensor', 'decoder.output_projection.weight', ] for k in ignore_keys: state_dict.pop(UpperCamelCase_ , UpperCamelCase_ ) def _lowercase ( UpperCamelCase_ ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = emb.weight.shape SCREAMING_SNAKE_CASE__ = nn.Linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ ) SCREAMING_SNAKE_CASE__ = emb.weight.data return lin_layer def _lowercase ( UpperCamelCase_ , UpperCamelCase_="facebook/mbart-large-en-ro" , UpperCamelCase_=False , UpperCamelCase_=False ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE__ = torch.load(UpperCamelCase_ , map_location='cpu' )['model'] remove_ignore_keys_(UpperCamelCase_ ) SCREAMING_SNAKE_CASE__ = state_dict['encoder.embed_tokens.weight'].shape[0] SCREAMING_SNAKE_CASE__ = MBartConfig.from_pretrained(UpperCamelCase_ , vocab_size=UpperCamelCase_ ) if mbart_aa and finetuned: SCREAMING_SNAKE_CASE__ = 'relu' SCREAMING_SNAKE_CASE__ = state_dict['decoder.embed_tokens.weight'] SCREAMING_SNAKE_CASE__ = MBartForConditionalGeneration(UpperCamelCase_ ) model.model.load_state_dict(UpperCamelCase_ ) if finetuned: SCREAMING_SNAKE_CASE__ = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": __snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( """fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem.""" ) parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--hf_config""", default="""facebook/mbart-large-cc25""", type=str, help="""Which huggingface architecture to use: mbart-large""", ) parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""") parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""") __snake_case = parser.parse_args() __snake_case = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
472
0
'''simple docstring''' from __future__ import annotations def lowercase__ ( __UpperCamelCase )-> None: create_state_space_tree(__UpperCamelCase , [] , 0 , [0 for i in range(len(__UpperCamelCase ) )] ) def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> None: if index == len(__UpperCamelCase ): print(__UpperCamelCase ) return for i in range(len(__UpperCamelCase ) ): if not index_used[i]: current_sequence.append(sequence[i] ) UpperCamelCase = True create_state_space_tree(__UpperCamelCase , __UpperCamelCase , index + 1 , __UpperCamelCase ) current_sequence.pop() UpperCamelCase = False SCREAMING_SNAKE_CASE__ = [3, 1, 2, 4] generate_all_permutations(sequence) SCREAMING_SNAKE_CASE__ = ["A", "B", "C"] generate_all_permutations(sequence_a)
35
'''simple docstring''' import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class a_ ( lowerCamelCase ): def A__ ( self ) -> int: """simple docstring""" UpperCamelCase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """tf_padding""" ) ) self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """depth_multiplier""" ) ) class a_ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=0.2_5 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE="relu6" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=None , ) -> List[str]: """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = num_channels UpperCamelCase = image_size UpperCamelCase = depth_multiplier UpperCamelCase = min_depth UpperCamelCase = tf_padding UpperCamelCase = int(last_hidden_size * depth_multiplier ) UpperCamelCase = output_stride UpperCamelCase = hidden_act UpperCamelCase = classifier_dropout_prob UpperCamelCase = use_labels UpperCamelCase = is_training UpperCamelCase = num_labels UpperCamelCase = initializer_range UpperCamelCase = scope def A__ ( self ) -> int: """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = None UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels ) UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) UpperCamelCase = self.get_config() return config, pixel_values, labels, pixel_labels def A__ ( self ) -> Optional[Any]: """simple docstring""" return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" UpperCamelCase = MobileNetVaModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = MobileNetVaForImageClassification(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A__ ( self ) -> Any: """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = config_and_inputs UpperCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class a_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ): lowercase = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else () lowercase = ( {"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification} if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False def A__ ( self ) -> int: """simple docstring""" UpperCamelCase = MobileNetVaModelTester(self ) UpperCamelCase = MobileNetVaConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Optional[Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" ) def A__ ( self ) -> int: """simple docstring""" pass @unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" ) def A__ ( self ) -> int: """simple docstring""" pass @unittest.skip(reason="""MobileNetV1 does not output attentions""" ) def A__ ( self ) -> Dict: """simple docstring""" pass def A__ ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE ) UpperCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE ) def A__ ( self ) -> str: """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> int: """simple docstring""" def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): UpperCamelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) UpperCamelCase = outputs.hidden_states UpperCamelCase = 26 self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = True check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase = True check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def A__ ( self ) -> str: """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE ) @slow def A__ ( self ) -> Dict: """simple docstring""" for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = MobileNetVaModel.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) def lowercase__ ( )-> Optional[Any]: UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class a_ ( unittest.TestCase ): @cached_property def A__ ( self ) -> Dict: """simple docstring""" return ( MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None ) @slow def A__ ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(_SCREAMING_SNAKE_CASE ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): UpperCamelCase = model(**_SCREAMING_SNAKE_CASE ) # verify the logits UpperCamelCase = torch.Size((1, 1001) ) self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE ) UpperCamelCase = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ).to(_SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
35
1
from datetime import datetime import requests def _UpperCamelCase ( lowercase__ ): __SCREAMING_SNAKE_CASE : Optional[int] = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url=''' __SCREAMING_SNAKE_CASE : Tuple = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src'''] return requests.get(lowercase__ ).content if __name__ == "__main__": __lowerCAmelCase : int =input('Enter Video/IGTV url: ').strip() __lowerCAmelCase : Union[str, Any] =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4""" with open(file_name, 'wb') as fp: fp.write(download_video(url)) print(f"""Done. Video saved to disk as {file_name}.""")
696
import json import os import shutil import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoConfig, BertConfig, GPTaConfig from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import TOKEN, USER, is_staging_test sys.path.append(str(Path(__file__).parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 __lowerCAmelCase : int ={ 'return_dict': False, 'output_hidden_states': True, 'output_attentions': True, 'torchscript': True, 'torch_dtype': 'float16', 'use_bfloat16': True, 'tf_legacy_loss': True, 'pruned_heads': {'a': 1}, 'tie_word_embeddings': False, 'is_decoder': True, 'cross_attention_hidden_size': 1_2_8, 'add_cross_attention': True, 'tie_encoder_decoder': True, 'max_length': 5_0, 'min_length': 3, 'do_sample': True, 'early_stopping': True, 'num_beams': 3, 'num_beam_groups': 3, 'diversity_penalty': 0.5, 'temperature': 2.0, 'top_k': 1_0, 'top_p': 0.7, 'typical_p': 0.2, 'repetition_penalty': 0.8, 'length_penalty': 0.8, 'no_repeat_ngram_size': 5, 'encoder_no_repeat_ngram_size': 5, 'bad_words_ids': [1, 2, 3], 'num_return_sequences': 3, 'chunk_size_feed_forward': 5, 'output_scores': True, 'return_dict_in_generate': True, 'forced_bos_token_id': 2, 'forced_eos_token_id': 3, 'remove_invalid_values': True, 'architectures': ['BertModel'], 'finetuning_task': 'translation', 'id2label': {0: 'label'}, 'label2id': {'label': '0'}, 'tokenizer_class': 'BertTokenizerFast', 'prefix': 'prefix', 'bos_token_id': 6, 'pad_token_id': 7, 'eos_token_id': 8, 'sep_token_id': 9, 'decoder_start_token_id': 1_0, 'exponential_decay_length_penalty': (5, 1.0_1), 'suppress_tokens': [0, 1], 'begin_suppress_tokens': 2, 'task_specific_params': {'translation': 'some_params'}, 'problem_type': 'regression', } @is_staging_test class _lowercase ( unittest.TestCase ): '''simple docstring''' @classmethod def __magic_name__( cls :Optional[Any] ) -> Any: __SCREAMING_SNAKE_CASE : str = TOKEN HfFolder.save_token(lowerCAmelCase__ ) @classmethod def __magic_name__( cls :List[str] ) -> List[str]: try: delete_repo(token=cls._token , repo_id='''test-config''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-config''' ) except HTTPError: pass def __magic_name__( self :Any ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE : int = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) config.push_to_hub('''test-config''' , use_auth_token=self._token ) __SCREAMING_SNAKE_CASE : Tuple = BertConfig.from_pretrained(f'''{USER}/test-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-config''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowerCAmelCase__ , repo_id='''test-config''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token ) __SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) def __magic_name__( self :int ) -> Optional[Any]: __SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token ) __SCREAMING_SNAKE_CASE : Any = BertConfig.from_pretrained('''valid_org/test-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowerCAmelCase__ , repo_id='''valid_org/test-config-org''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token ) __SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''valid_org/test-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) def __magic_name__( self :Dict ) -> Optional[int]: CustomConfig.register_for_auto_class() __SCREAMING_SNAKE_CASE : Tuple = CustomConfig(attribute=42 ) config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} ) __SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''' , trust_remote_code=lowerCAmelCase__ ) # Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' ) self.assertEqual(new_config.attribute , 42 ) class _lowercase ( unittest.TestCase ): '''simple docstring''' def __magic_name__( self :List[str] ) -> Dict: __SCREAMING_SNAKE_CASE : Optional[Any] = GPTaConfig() # attempt to modify each of int/float/bool/str config records and verify they were updated __SCREAMING_SNAKE_CASE : Optional[Any] = c.n_embd + 1 # int __SCREAMING_SNAKE_CASE : Optional[Any] = c.resid_pdrop + 1.0 # float __SCREAMING_SNAKE_CASE : Dict = not c.scale_attn_weights # bool __SCREAMING_SNAKE_CASE : Optional[int] = c.summary_type + '''foo''' # str c.update_from_string( f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' ) self.assertEqual(lowerCAmelCase__ , c.n_embd , '''mismatch for key: n_embd''' ) self.assertEqual(lowerCAmelCase__ , c.resid_pdrop , '''mismatch for key: resid_pdrop''' ) self.assertEqual(lowerCAmelCase__ , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' ) self.assertEqual(lowerCAmelCase__ , c.summary_type , '''mismatch for key: summary_type''' ) def __magic_name__( self :Dict ) -> str: __SCREAMING_SNAKE_CASE : Dict = PretrainedConfig() __SCREAMING_SNAKE_CASE : str = [key for key in base_config.__dict__ if key not in config_common_kwargs] # If this part of the test fails, you have arguments to addin config_common_kwargs above. self.assertListEqual( lowerCAmelCase__ , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] ) __SCREAMING_SNAKE_CASE : List[Any] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCAmelCase__ , lowerCAmelCase__ )] if len(lowerCAmelCase__ ) > 0: raise ValueError( '''The following keys are set with the default values in''' ''' `test_configuration_common.config_common_kwargs` pick another value for them:''' f''' {', '.join(lowerCAmelCase__ )}.''' ) def __magic_name__( self :Union[str, Any] ) -> List[Any]: with self.assertRaises(lowerCAmelCase__ ): # config is in subfolder, the following should not work without specifying the subfolder __SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' ) __SCREAMING_SNAKE_CASE : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' ) self.assertIsNotNone(lowerCAmelCase__ ) def __magic_name__( self :List[Any] ) -> Optional[Any]: # A mock response for an HTTP head request to emulate server down __SCREAMING_SNAKE_CASE : Union[str, Any] = mock.Mock() __SCREAMING_SNAKE_CASE : List[Any] = 500 __SCREAMING_SNAKE_CASE : Union[str, Any] = {} __SCREAMING_SNAKE_CASE : Optional[Any] = HTTPError __SCREAMING_SNAKE_CASE : str = {} # Download this model to make sure it's in the cache. __SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''' , return_value=lowerCAmelCase__ ) as mock_head: __SCREAMING_SNAKE_CASE : Optional[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) # This check we did call the fake head request mock_head.assert_called() def __magic_name__( self :Union[str, Any] ) -> List[Any]: # This test is for deprecated behavior and can be removed in v5 __SCREAMING_SNAKE_CASE : Optional[int] = BertConfig.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' ) def __magic_name__( self :str ) -> List[str]: __SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained('''bert-base-cased''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = ['''config.4.0.0.json'''] with tempfile.TemporaryDirectory() as tmp_dir: configuration.save_pretrained(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[Any] = 2 json.dump(configuration.to_dict() , open(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , '''w''' ) ) # This should pick the new configuration file as the version of Transformers is > 4.0.0 __SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(lowerCAmelCase__ ) self.assertEqual(new_configuration.hidden_size , 2 ) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old configuration file as the version of Transformers is < 4.42.0 __SCREAMING_SNAKE_CASE : List[Any] = ['''config.42.0.0.json'''] __SCREAMING_SNAKE_CASE : Tuple = 768 configuration.save_pretrained(lowerCAmelCase__ ) shutil.move(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , os.path.join(lowerCAmelCase__ , '''config.42.0.0.json''' ) ) __SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(lowerCAmelCase__ ) self.assertEqual(new_configuration.hidden_size , 768 ) def __magic_name__( self :List[str] ) -> Union[str, Any]: # This repo has two configuration files, one for v4.0.0 and above with a different hidden size. __SCREAMING_SNAKE_CASE : Union[str, Any] = '''hf-internal-testing/test-two-configs''' import transformers as new_transformers __SCREAMING_SNAKE_CASE : int = '''v4.0.0''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = new_transformers.models.auto.AutoConfig.from_pretrained( lowerCAmelCase__ , return_unused_kwargs=lowerCAmelCase__ ) self.assertEqual(new_configuration.hidden_size , 2 ) # This checks `_configuration_file` ia not kept in the kwargs by mistake. self.assertDictEqual(lowerCAmelCase__ , {} ) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers __SCREAMING_SNAKE_CASE : List[str] = '''v3.0.0''' __SCREAMING_SNAKE_CASE : Any = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCAmelCase__ ) self.assertEqual(old_configuration.hidden_size , 768 )
696
1
from collections import defaultdict from math import ceil, sqrt def UpperCamelCase ( __lowercase : int = 1_00_00_00 ,__lowercase : int = 10 ): '''simple docstring''' A_ : defaultdict = defaultdict(__lowercase ) for outer_width in range(3 ,(t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: A_ : Any = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) ,1 ) else: A_ : List[Any] = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(__lowercase ,outer_width - 1 ,2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(F"""{solution() = }""")
70
from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in ["bert-base-uncased"]: A_ : Any = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Optional[Any] = TFAutoModel.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Dict = AutoModel.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in ["bert-base-uncased"]: A_ : int = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : str = TFAutoModelForPreTraining.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : str = AutoModelForPreTraining.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : List[Any] = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Dict = TFAutoModelForCausalLM.from_pretrained(lowercase , from_pt=lowercase ) A_ , A_ : Optional[int] = TFAutoModelForCausalLM.from_pretrained( lowercase , output_loading_info=lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Tuple = AutoModelForCausalLM.from_pretrained(lowercase , from_tf=lowercase ) A_ , A_ : List[str] = AutoModelForCausalLM.from_pretrained( lowercase , output_loading_info=lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Tuple = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : int = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : int = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : str = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(lowercase , from_pt=lowercase ) A_ , A_ : str = TFAutoModelForMaskedLM.from_pretrained( lowercase , output_loading_info=lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : List[Any] = AutoModelForMaskedLM.from_pretrained(lowercase , from_tf=lowercase ) A_ , A_ : Tuple = AutoModelForMaskedLM.from_pretrained( lowercase , output_loading_info=lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Dict = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase , from_pt=lowercase ) A_ , A_ : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained( lowercase , output_loading_info=lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase , from_tf=lowercase ) A_ , A_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained( lowercase , output_loading_info=lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in ["bert-base-uncased"]: A_ : List[str] = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in ["bert-base-uncased"]: A_ : List[Any] = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : str = TFAutoModelForQuestionAnswering.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : List[Any] = AutoModelForQuestionAnswering.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual(model.num_parameters() , 1_4_4_1_0 ) self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 ) A_ : Dict = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual(model.num_parameters() , 1_4_4_1_0 ) self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Dict = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual(model.num_parameters() , 1_4_4_1_0 ) self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 ) A_ : Dict = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual(model.num_parameters() , 1_4_4_1_0 ) self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
70
1
def UpperCAmelCase_ ( __lowerCAmelCase ) -> bool: if not all(x.isalpha() for x in string ): raise ValueError('''String must only contain alphabetic characters.''' ) __lowercase : Optional[Any] = sorted(string.lower() ) return len(__a ) == len(set(__a ) ) if __name__ == "__main__": __lowerCAmelCase : Optional[int] = input("Enter a string ").strip() __lowerCAmelCase : Optional[int] = is_isogram(input_str) print(F'{input_str} is {"an" if isogram else "not an"} isogram.')
509
import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def __UpperCAmelCase ( __a : bytes ,__a : int ) -> np.array: """simple docstring""" _a : int = F"""{sampling_rate}""" _a : str = '''1''' _a : Optional[int] = '''f32le''' _a : Optional[Any] = [ '''ffmpeg''', '''-i''', '''pipe:0''', '''-ac''', ac, '''-ar''', ar, '''-f''', format_for_conversion, '''-hide_banner''', '''-loglevel''', '''quiet''', '''pipe:1''', ] try: with subprocess.Popen(__a ,stdin=subprocess.PIPE ,stdout=subprocess.PIPE ) as ffmpeg_process: _a : Any = ffmpeg_process.communicate(__a ) except FileNotFoundError as error: raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error _a : Optional[Any] = output_stream[0] _a : Optional[int] = np.frombuffer(__a ,np.floataa ) if audio.shape[0] == 0: raise ValueError('''Malformed soundfile''' ) return audio def __UpperCAmelCase ( __a : int ,__a : float ,__a : str = "f32le" ,) -> str: """simple docstring""" _a : Dict = F"""{sampling_rate}""" _a : Optional[Any] = '''1''' if format_for_conversion == "s16le": _a : Dict = 2 elif format_for_conversion == "f32le": _a : Optional[Any] = 4 else: raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" ) _a : Dict = platform.system() if system == "Linux": _a : Dict = '''alsa''' _a : Union[str, Any] = '''default''' elif system == "Darwin": _a : Union[str, Any] = '''avfoundation''' _a : List[str] = ''':0''' elif system == "Windows": _a : Optional[int] = '''dshow''' _a : str = '''default''' _a : Tuple = [ '''ffmpeg''', '''-f''', format_, '''-i''', input_, '''-ac''', ac, '''-ar''', ar, '''-f''', format_for_conversion, '''-fflags''', '''nobuffer''', '''-hide_banner''', '''-loglevel''', '''quiet''', '''pipe:1''', ] _a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample _a : str = _ffmpeg_stream(__a ,__a ) for item in iterator: yield item def __UpperCAmelCase ( __a : int ,__a : float ,__a : Optional[int] = None ,__a : Optional[Union[Tuple[float, float], float]] = None ,__a : str = "f32le" ,) -> Optional[int]: """simple docstring""" if stream_chunk_s is not None: _a : Tuple = stream_chunk_s else: _a : Tuple = chunk_length_s _a : Tuple = ffmpeg_microphone(__a ,__a ,format_for_conversion=__a ) if format_for_conversion == "s16le": _a : Any = np.intaa _a : Optional[int] = 2 elif format_for_conversion == "f32le": _a : Dict = np.floataa _a : List[Any] = 4 else: raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" ) if stride_length_s is None: _a : List[Any] = chunk_length_s / 6 _a : Optional[int] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(__a ,(int, float) ): _a : Optional[Any] = [stride_length_s, stride_length_s] _a : Optional[Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample _a : str = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample _a : Optional[Any] = datetime.datetime.now() _a : Tuple = datetime.timedelta(seconds=__a ) for item in chunk_bytes_iter(__a ,__a ,stride=(stride_left, stride_right) ,stream=__a ): # Put everything back in numpy scale _a : Dict = np.frombuffer(item['''raw'''] ,dtype=__a ) _a : Dict = ( item['''stride'''][0] // size_of_sample, item['''stride'''][1] // size_of_sample, ) _a : str = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def __UpperCAmelCase ( __a : Optional[int] ,__a : int ,__a : Tuple[int, int] ,__a : bool = False ) -> Optional[int]: """simple docstring""" _a : Any = b'''''' _a , _a : List[str] = stride if stride_left + stride_right >= chunk_len: raise ValueError( F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" ) _a : List[str] = 0 for raw in iterator: acc += raw if stream and len(__a ) < chunk_len: _a : Dict = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(__a ) >= chunk_len: # We are flushing the accumulator _a : List[str] = (_stride_left, stride_right) _a : List[Any] = {'''raw''': acc[:chunk_len], '''stride''': stride} if stream: _a : List[Any] = False yield item _a : Optional[Any] = stride_left _a : Optional[Any] = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(__a ) > stride_left: _a : Optional[Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)} if stream: _a : Dict = False yield item def __UpperCAmelCase ( __a : int ,__a : int ) -> Tuple: """simple docstring""" _a : Dict = 2**24 # 16Mo try: with subprocess.Popen(__a ,stdout=subprocess.PIPE ,bufsize=__a ) as ffmpeg_process: while True: _a : int = ffmpeg_process.stdout.read(__a ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
14
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available A__ : Union[str, Any] = {'configuration_swin': ['SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwinConfig', 'SwinOnnxConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Union[str, Any] = [ 'SWIN_PRETRAINED_MODEL_ARCHIVE_LIST', 'SwinForImageClassification', 'SwinForMaskedImageModeling', 'SwinModel', 'SwinPreTrainedModel', 'SwinBackbone', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : str = [ 'TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFSwinForImageClassification', 'TFSwinForMaskedImageModeling', 'TFSwinModel', 'TFSwinPreTrainedModel', ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys A__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
711
"""simple docstring""" import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowercase__ ( snake_case__ ): _UpperCAmelCase :Tuple = ["image_processor", "tokenizer"] _UpperCAmelCase :Dict = "LayoutLMv3ImageProcessor" _UpperCAmelCase :Any = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast") def __init__( self : List[Any] , snake_case__ : str=None , snake_case__ : int=None , **snake_case__ : Dict ): lowerCamelCase_ : List[str] =None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , snake_case__ , ) lowerCamelCase_ : List[str] =kwargs.pop("feature_extractor" ) lowerCamelCase_ : Union[str, Any] =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(snake_case__ , snake_case__ ) def __call__( self : Optional[Any] , snake_case__ : Any , snake_case__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case__ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , snake_case__ : Union[List[List[int]], List[List[List[int]]]] = None , snake_case__ : Optional[Union[List[int], List[List[int]]]] = None , snake_case__ : bool = True , snake_case__ : Union[bool, str, PaddingStrategy] = False , snake_case__ : Union[bool, str, TruncationStrategy] = None , snake_case__ : Optional[int] = None , snake_case__ : int = 0 , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = True , snake_case__ : Optional[Union[str, TensorType]] = None , **snake_case__ : int , ): # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) # first, apply the image processor lowerCamelCase_ : Dict =self.image_processor(images=snake_case__ , return_tensors=snake_case__ ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(snake_case__ , snake_case__ ): lowerCamelCase_ : List[Any] =[text] # add batch dimension (as the image processor always adds a batch dimension) lowerCamelCase_ : List[str] =features["words"] lowerCamelCase_ : List[str] =self.tokenizer( text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_token_type_ids=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , ) # add pixel values lowerCamelCase_ : str =features.pop("pixel_values" ) if return_overflowing_tokens is True: lowerCamelCase_ : Tuple =self.get_overflowing_images(snake_case__ , encoded_inputs["overflow_to_sample_mapping"] ) lowerCamelCase_ : Dict =images return encoded_inputs def UpperCAmelCase__ ( self : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Any ): # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image lowerCamelCase_ : Optional[int] =[] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(snake_case__ ) != len(snake_case__ ): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" F""" {len(snake_case__ )} and {len(snake_case__ )}""" ) return images_with_overflow def UpperCAmelCase__ ( self : str , *snake_case__ : List[str] , **snake_case__ : List[str] ): return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def UpperCAmelCase__ ( self : List[Any] , *snake_case__ : Tuple , **snake_case__ : Any ): return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @property def UpperCAmelCase__ ( self : Optional[Any] ): return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def UpperCAmelCase__ ( self : Union[str, Any] ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , snake_case__ , ) return self.image_processor_class @property def UpperCAmelCase__ ( self : List[Any] ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , snake_case__ , ) return self.image_processor
244
0
import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def UpperCamelCase_ ( self : Any ): __A = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(A ,"tf_padding" ) ) self.parent.assertTrue(hasattr(A ,"depth_multiplier" ) ) class UpperCAmelCase : '''simple docstring''' def __init__( self : Optional[Any] ,A : int ,A : List[Any]=13 ,A : int=3 ,A : Optional[Any]=32 ,A : Union[str, Any]=0.25 ,A : Tuple=8 ,A : Optional[int]=True ,A : Union[str, Any]=10_24 ,A : Any=32 ,A : Optional[int]="relu6" ,A : int=0.1 ,A : Optional[Any]=0.02 ,A : Optional[Any]=True ,A : List[str]=True ,A : str=10 ,A : str=None ,): __A = parent __A = batch_size __A = num_channels __A = image_size __A = depth_multiplier __A = min_depth __A = tf_padding __A = int(last_hidden_size * depth_multiplier ) __A = output_stride __A = hidden_act __A = classifier_dropout_prob __A = use_labels __A = is_training __A = num_labels __A = initializer_range __A = scope def UpperCamelCase_ ( self : Optional[int] ): __A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __A = None __A = None if self.use_labels: __A = ids_tensor([self.batch_size] ,self.num_labels ) __A = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels ) __A = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCamelCase_ ( self : Any ): return MobileNetVaConfig( num_channels=self.num_channels ,image_size=self.image_size ,depth_multiplier=self.depth_multiplier ,min_depth=self.min_depth ,tf_padding=self.tf_padding ,hidden_act=self.hidden_act ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,) def UpperCamelCase_ ( self : Optional[int] ,A : str ,A : Tuple ,A : Optional[int] ,A : List[str] ): __A = MobileNetVaModel(config=A ) model.to(A ) model.eval() __A = model(A ) self.parent.assertEqual( result.last_hidden_state.shape ,( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) ,) def UpperCamelCase_ ( self : List[Any] ,A : Union[str, Any] ,A : List[Any] ,A : int ,A : Union[str, Any] ): __A = self.num_labels __A = MobileNetVaForImageClassification(A ) model.to(A ) model.eval() __A = model(A ,labels=A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self : Tuple ): __A = self.prepare_config_and_inputs() __A , __A , __A , __A = config_and_inputs __A = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' snake_case_ = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else () snake_case_ = ( {"feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification} if is_torch_available() else {} ) snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = False def UpperCamelCase_ ( self : Any ): __A = MobileNetVaModelTester(self ) __A = MobileNetVaConfigTester(self ,config_class=A ,has_text_modality=A ) def UpperCamelCase_ ( self : str ): self.config_tester.run_common_tests() @unittest.skip(reason="MobileNetV1 does not use inputs_embeds" ) def UpperCamelCase_ ( self : Union[str, Any] ): pass @unittest.skip(reason="MobileNetV1 does not support input and output embeddings" ) def UpperCamelCase_ ( self : Tuple ): pass @unittest.skip(reason="MobileNetV1 does not output attentions" ) def UpperCamelCase_ ( self : Any ): pass def UpperCamelCase_ ( self : Optional[int] ): __A , __A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __A = model_class(A ) __A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __A = [*signature.parameters.keys()] __A = ["pixel_values"] self.assertListEqual(arg_names[:1] ,A ) def UpperCamelCase_ ( self : List[Any] ): __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def UpperCamelCase_ ( self : Optional[int] ): def check_hidden_states_output(A : List[Any] ,A : List[Any] ,A : Optional[int] ): __A = model_class(A ) model.to(A ) model.eval() with torch.no_grad(): __A = model(**self._prepare_for_class(A ,A ) ) __A = outputs.hidden_states __A = 26 self.assertEqual(len(A ) ,A ) __A , __A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __A = True check_hidden_states_output(A ,A ,A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __A = True check_hidden_states_output(A ,A ,A ) def UpperCamelCase_ ( self : Tuple ): __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A ) @slow def UpperCamelCase_ ( self : Union[str, Any] ): for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __A = MobileNetVaModel.from_pretrained(A ) self.assertIsNotNone(A ) def UpperCAmelCase ( ) -> str: """simple docstring""" __A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self : List[str] ): return ( MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None ) @slow def UpperCamelCase_ ( self : Optional[Any] ): __A = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(A ) __A = self.default_image_processor __A = prepare_img() __A = image_processor(images=A ,return_tensors="pt" ).to(A ) # forward pass with torch.no_grad(): __A = model(**A ) # verify the logits __A = torch.Size((1, 10_01) ) self.assertEqual(outputs.logits.shape ,A ) __A = torch.tensor([-4.17_39, -1.12_33, 3.12_05] ).to(A ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A ,atol=1E-4 ) )
55
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) class UpperCAmelCase_ (snake_case__ ): """simple docstring""" lowerCamelCase : Dict = 'encoder-decoder' lowerCamelCase : Optional[Any] = True def __init__( self: str , **_UpperCAmelCase: int ): super().__init__(**_UpperCAmelCase ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" _lowerCAmelCase :Optional[Any] = kwargs.pop('encoder' ) _lowerCAmelCase :Dict = encoder_config.pop('model_type' ) _lowerCAmelCase :str = kwargs.pop('decoder' ) _lowerCAmelCase :str = decoder_config.pop('model_type' ) from ..auto.configuration_auto import AutoConfig _lowerCAmelCase :str = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase ) _lowerCAmelCase :Tuple = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase ) _lowerCAmelCase :Any = True @classmethod def SCREAMING_SNAKE_CASE__ ( cls: Tuple , _UpperCAmelCase: PretrainedConfig , _UpperCAmelCase: PretrainedConfig , **_UpperCAmelCase: str ): logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' ) _lowerCAmelCase :Dict = True _lowerCAmelCase :List[str] = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Dict ): _lowerCAmelCase :Union[str, Any] = copy.deepcopy(self.__dict__ ) _lowerCAmelCase :Optional[int] = self.encoder.to_dict() _lowerCAmelCase :Union[str, Any] = self.decoder.to_dict() _lowerCAmelCase :List[str] = self.__class__.model_type return output
687
0
"""simple docstring""" import collections import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ="▁" __SCREAMING_SNAKE_CASE ={"vocab_file": "prophetnet.tokenizer"} __SCREAMING_SNAKE_CASE ={ "vocab_file": { "microsoft/xprophetnet-large-wiki100-cased": ( "https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer" ), } } __SCREAMING_SNAKE_CASE ={ "microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False}, } __SCREAMING_SNAKE_CASE ={ "microsoft/xprophetnet-large-wiki100-cased": 512, } def lowercase__( __SCREAMING_SNAKE_CASE : Dict ): lowercase_ : str = collections.OrderedDict() with open(__SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as reader: lowercase_ : Union[str, Any] = reader.readlines() for index, token in enumerate(__SCREAMING_SNAKE_CASE ): lowercase_ : List[str] = token.rstrip('\n' ) lowercase_ : List[str] = index return vocab class UpperCamelCase ( lowercase_ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ['input_ids', 'attention_mask'] def __init__( self ,__UpperCamelCase ,__UpperCamelCase="[SEP]" ,__UpperCamelCase="[SEP]" ,__UpperCamelCase="[SEP]" ,__UpperCamelCase="[UNK]" ,__UpperCamelCase="[PAD]" ,__UpperCamelCase="[CLS]" ,__UpperCamelCase="[MASK]" ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> None: '''simple docstring''' lowercase_ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCamelCase ,) try: import sentencepiece as spm except ImportError: logger.warning( 'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece' ' pip install sentencepiece' ) raise lowercase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__UpperCamelCase ) ) lowercase_ : List[Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # put special tokens and [unused] tokens into the vocab lowercase_ : Dict = {'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[UNK]': 3, '[MASK]': 4} for i in range(10 ): lowercase_ : Optional[Any] = f'''[unused{i}]''' lowercase_ : Dict = 5 + i # The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab lowercase_ : Union[str, Any] = 12 lowercase_ : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()} for k in self.fairseq_tokens_to_ids.keys(): self.unique_no_split_tokens.append(__UpperCamelCase ) def __getstate__( self ) -> Any: '''simple docstring''' lowercase_ : Tuple = self.__dict__.copy() lowercase_ : Tuple = None return state def __setstate__( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : List[Any] = d try: import sentencepiece as spm except ImportError: logger.warning( 'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece' ' pip install sentencepiece' ) raise # for backward compatibility if not hasattr(self ,'sp_model_kwargs' ): lowercase_ : str = {} lowercase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCamelCase ,token_ids_a=__UpperCamelCase ,already_has_special_tokens=__UpperCamelCase ) if token_ids_a is None: return ([0] * len(__UpperCamelCase )) + [1] return ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase )) + [1] def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]: '''simple docstring''' lowercase_ : Dict = [self.sep_token_id] if token_ids_a is None: return len(token_ids_a + sep ) * [0] return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _UpperCAmelCase ( self ) -> str: '''simple docstring''' return len(self.sp_model ) + self.fairseq_offset def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Union[str, Any] = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _UpperCAmelCase ( self ,__UpperCamelCase ) -> str: '''simple docstring''' return self.sp_model.encode(__UpperCamelCase ,out_type=__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowercase_ : str = self.sp_model.PieceToId(__UpperCamelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' lowercase_ : List[str] = ''.join(__UpperCamelCase ).replace(__UpperCamelCase ,' ' ).strip() return out_string def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(__UpperCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase_ : Tuple = os.path.join( __UpperCamelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,__UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCamelCase ,'wb' ) as fi: lowercase_ : Any = self.sp_model.serialized_model_proto() fi.write(__UpperCamelCase ) return (out_vocab_file,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return token_ids_a + [self.sep_token_id] lowercase_ : Optional[int] = [self.sep_token_id] return token_ids_a + sep + token_ids_a + sep
704
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) class UpperCamelCase ( lowercase_ ): lowercase = ['input_features'] def __init__( self ,__UpperCamelCase=80 ,__UpperCamelCase=1_6000 ,__UpperCamelCase=160 ,__UpperCamelCase=30 ,__UpperCamelCase=400 ,__UpperCamelCase=0.0 ,__UpperCamelCase=False ,**__UpperCamelCase ,) -> Any: '''simple docstring''' super().__init__( feature_size=__UpperCamelCase ,sampling_rate=__UpperCamelCase ,padding_value=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,**__UpperCamelCase ,) lowercase_ : Dict = n_fft lowercase_ : Optional[Any] = hop_length lowercase_ : Optional[int] = chunk_length lowercase_ : Optional[Any] = chunk_length * sampling_rate lowercase_ : Tuple = self.n_samples // hop_length lowercase_ : int = sampling_rate lowercase_ : int = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 ,num_mel_filters=__UpperCamelCase ,min_frequency=0.0 ,max_frequency=8000.0 ,sampling_rate=__UpperCamelCase ,norm='slaney' ,mel_scale='slaney' ,) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> np.ndarray: '''simple docstring''' lowercase_ : Union[str, Any] = spectrogram( __UpperCamelCase ,window_function(self.n_fft ,'hann' ) ,frame_length=self.n_fft ,hop_length=self.hop_length ,power=2.0 ,mel_filters=self.mel_filters ,log_mel='log10' ,) lowercase_ : Optional[Any] = log_spec[:, :-1] lowercase_ : int = np.maximum(__UpperCamelCase ,log_spec.max() - 8.0 ) lowercase_ : List[Any] = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def _UpperCAmelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = 0.0 ) -> List[np.ndarray]: '''simple docstring''' if attention_mask is not None: lowercase_ : Optional[int] = np.array(__UpperCamelCase ,np.intaa ) lowercase_ : Optional[Any] = [] for vector, length in zip(__UpperCamelCase ,attention_mask.sum(-1 ) ): lowercase_ : Union[str, Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 ) if length < normed_slice.shape[0]: lowercase_ : Any = padding_value normed_input_values.append(__UpperCamelCase ) else: lowercase_ : Optional[int] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values] return normed_input_values def __call__( self ,__UpperCamelCase ,__UpperCamelCase = True ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = "max_length" ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> BatchFeature: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) lowercase_ : Optional[int] = isinstance(__UpperCamelCase ,np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) lowercase_ : Union[str, Any] = is_batched_numpy or ( isinstance(__UpperCamelCase ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: lowercase_ : Union[str, Any] = [np.asarray([speech] ,dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__UpperCamelCase ,np.ndarray ): lowercase_ : Dict = np.asarray(__UpperCamelCase ,dtype=np.floataa ) elif isinstance(__UpperCamelCase ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowercase_ : Dict = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowercase_ : List[Any] = [np.asarray([raw_speech] ).T] lowercase_ : str = BatchFeature({'input_features': raw_speech} ) # convert into correct format for padding lowercase_ : str = self.pad( __UpperCamelCase ,padding=__UpperCamelCase ,max_length=max_length if max_length else self.n_samples ,truncation=__UpperCamelCase ,pad_to_multiple_of=__UpperCamelCase ,return_attention_mask=return_attention_mask or do_normalize ,) # zero-mean and unit-variance normalization if do_normalize: lowercase_ : int = self.zero_mean_unit_var_norm( padded_inputs['input_features'] ,attention_mask=padded_inputs['attention_mask'] ,padding_value=self.padding_value ,) lowercase_ : Tuple = np.stack(padded_inputs['input_features'] ,axis=0 ) # make sure list is in array format lowercase_ : Dict = padded_inputs.get('input_features' ).transpose(2 ,0 ,1 ) lowercase_ : List[str] = [self._np_extract_fbank_features(__UpperCamelCase ) for waveform in input_features[0]] if isinstance(input_features[0] ,__UpperCamelCase ): lowercase_ : Optional[Any] = [np.asarray(__UpperCamelCase ,dtype=np.floataa ) for feature in input_features] else: lowercase_ : str = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) lowercase_ : str = padded_inputs['attention_mask'][:, :: self.hop_length] if return_tensors is not None: lowercase_ : Union[str, Any] = padded_inputs.convert_to_tensors(__UpperCamelCase ) return padded_inputs def _UpperCAmelCase ( self ) -> Dict[str, Any]: '''simple docstring''' lowercase_ : Dict = copy.deepcopy(self.__dict__ ) lowercase_ : Union[str, Any] = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
477
0
'''simple docstring''' import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class __A ( _lowercase ): def _lowercase (self : List[str] ): UpperCAmelCase_ = tempfile.mkdtemp() UpperCAmelCase_ = 5 # Realm tok UpperCAmelCase_ = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''test''', '''question''', '''this''', '''is''', '''the''', '''first''', '''second''', '''third''', '''fourth''', '''fifth''', '''record''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_tokenizer" ) os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase ) UpperCAmelCase_ = os.path.join(__UpperCAmelCase , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_block_records" ) os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase ) def _lowercase (self : List[Any] ): return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) ) def _lowercase (self : Optional[int] ): shutil.rmtree(self.tmpdirname ) def _lowercase (self : Tuple ): UpperCAmelCase_ = RealmConfig(num_block_records=self.num_block_records ) return config def _lowercase (self : int ): UpperCAmelCase_ = Dataset.from_dict( { "id": ["0", "1"], "question": ["foo", "bar"], "answers": [["Foo", "Bar"], ["Bar"]], } ) return dataset def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = np.array( [ B"This is the first record", B"This is the second record", B"This is the third record", B"This is the fourth record", B"This is the fifth record", B"This is a longer longer longer record", ] , dtype=__UpperCAmelCase , ) return block_records def _lowercase (self : Any ): UpperCAmelCase_ = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def _lowercase (self : Dict ): UpperCAmelCase_ = self.get_config() UpperCAmelCase_ = self.get_dummy_retriever() UpperCAmelCase_ = retriever.tokenizer UpperCAmelCase_ = np.array([0, 3] , dtype="long" ) UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids UpperCAmelCase_ = tokenizer( ["the fourth"] , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , ).input_ids UpperCAmelCase_ = config.reader_seq_len UpperCAmelCase_ = retriever( __UpperCAmelCase , __UpperCAmelCase , answer_ids=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors="np" ) self.assertEqual(len(__UpperCAmelCase ) , 2 ) self.assertEqual(len(__UpperCAmelCase ) , 2 ) self.assertEqual(len(__UpperCAmelCase ) , 2 ) self.assertEqual(concat_inputs.input_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) ) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , ) def _lowercase (self : int ): UpperCAmelCase_ = self.get_config() UpperCAmelCase_ = self.get_dummy_retriever() UpperCAmelCase_ = retriever.tokenizer UpperCAmelCase_ = np.array([0, 3, 5] , dtype="long" ) UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids UpperCAmelCase_ = tokenizer( ["the fourth", "longer longer"] , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , ).input_ids UpperCAmelCase_ = config.reader_seq_len UpperCAmelCase_ = retriever( __UpperCAmelCase , __UpperCAmelCase , answer_ids=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors="np" ) self.assertEqual([False, True, True] , __UpperCAmelCase ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __UpperCAmelCase ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __UpperCAmelCase ) def _lowercase (self : Optional[int] ): UpperCAmelCase_ = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) ) # Test local path UpperCAmelCase_ = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) ) self.assertEqual(retriever.block_records[0] , B"This is the first record" ) # Test mocked remote path with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download: UpperCAmelCase_ = os.path.join( os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME ) UpperCAmelCase_ = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" ) self.assertEqual(retriever.block_records[0] , B"This is the first record" )
78
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase_ = { """configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ """NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""", """NezhaForNextSentencePrediction""", """NezhaForMaskedLM""", """NezhaForPreTraining""", """NezhaForMultipleChoice""", """NezhaForQuestionAnswering""", """NezhaForSequenceClassification""", """NezhaForTokenClassification""", """NezhaModel""", """NezhaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
678
0
'''simple docstring''' from __future__ import annotations import math def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase_ : Optional[int] = u for i in range(1 , SCREAMING_SNAKE_CASE_ ): lowercase_ : Any = temp * (u - i) return temp def _UpperCamelCase ( ): lowercase_ : Dict = int(input('enter the numbers of values: ' ) ) lowercase_ : list[list[float]] = [] for _ in range(SCREAMING_SNAKE_CASE_ ): y.append([] ) for i in range(SCREAMING_SNAKE_CASE_ ): for j in range(SCREAMING_SNAKE_CASE_ ): y[i].append(SCREAMING_SNAKE_CASE_ ) lowercase_ : str = 0 print('enter the values of parameters in a list: ' ) lowercase_ : Any = list(map(SCREAMING_SNAKE_CASE_ , input().split() ) ) print('enter the values of corresponding parameters: ' ) for i in range(SCREAMING_SNAKE_CASE_ ): lowercase_ : List[str] = float(input() ) lowercase_ : Dict = int(input('enter the value to interpolate: ' ) ) lowercase_ : Any = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , SCREAMING_SNAKE_CASE_ ): for j in range(n - i ): lowercase_ : Optional[Any] = y[j + 1][i - 1] - y[j][i - 1] lowercase_ : Union[str, Any] = y[0][0] for i in range(1 , SCREAMING_SNAKE_CASE_ ): summ += (ucal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) * y[0][i]) / math.factorial(SCREAMING_SNAKE_CASE_ ) print(f'''the value at {value} is {summ}''' ) if __name__ == "__main__": main()
438
'''simple docstring''' from decimal import Decimal, getcontext from math import ceil, factorial def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ): if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise TypeError('Undefined for non-integers' ) elif precision < 1: raise ValueError('Undefined for non-natural numbers' ) lowercase_ : str = precision lowercase_ : List[str] = ceil(precision / 14 ) lowercase_ : Union[str, Any] = 426_880 * Decimal(10_005 ).sqrt() lowercase_ : List[Any] = 1 lowercase_ : Optional[int] = 13_591_409 lowercase_ : Dict = Decimal(SCREAMING_SNAKE_CASE_ ) for k in range(1 , SCREAMING_SNAKE_CASE_ ): lowercase_ : List[str] = factorial(6 * k ) // (factorial(3 * k ) * factorial(SCREAMING_SNAKE_CASE_ ) ** 3) linear_term += 545_140_134 exponential_term *= -262_537_412_640_768_000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": _A = 5_0 print(F"""The first {n} digits of pi is: {pi(n)}""")
438
1
import requests def lowercase_ ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowercase = {'''Content-Type''': '''application/json'''} __lowercase = requests.post(_UpperCamelCase , json={'''text''': message_body} , headers=_UpperCamelCase ) if response.status_code != 2_00: __lowercase = ( '''Request to slack returned an error ''' F'{response.status_code}, the response is:\n{response.text}' ) raise ValueError(_UpperCamelCase ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
639
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) a : Optional[Any] = { '''configuration_llama''': ['''LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LlamaConfig'''], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[int] = ['''LlamaTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[int] = ['''LlamaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Any = [ '''LlamaForCausalLM''', '''LlamaModel''', '''LlamaPreTrainedModel''', '''LlamaForSequenceClassification''', ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
639
1
"""simple docstring""" import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( 'split_dict', [ SplitDict(), SplitDict({'train': SplitInfo(name='train', num_bytes=1_3_3_7, num_examples=4_2, dataset_name='my_dataset' )} ), SplitDict({'train': SplitInfo(name='train', num_bytes=1_3_3_7, num_examples=4_2 )} ), SplitDict({'train': SplitInfo()} ), ], ) def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ ): SCREAMING_SNAKE_CASE = split_dict._to_yaml_list() assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = SplitDict._from_yaml_list(SCREAMING_SNAKE_CASE_ ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump SCREAMING_SNAKE_CASE = None # the split name of split_dict takes over the name of the split info object SCREAMING_SNAKE_CASE = split_name assert split_dict == reloaded @pytest.mark.parametrize( 'split_info', [SplitInfo(), SplitInfo(dataset_name=SCREAMING_SNAKE_CASE_ ), SplitInfo(dataset_name='my_dataset' )] ) def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ ): # For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name" # field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files SCREAMING_SNAKE_CASE = asdict(SplitDict({'train': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
406
"""simple docstring""" import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): # Initialise PyTorch model SCREAMING_SNAKE_CASE = TaConfig.from_json_file(SCREAMING_SNAKE_CASE_ ) print(f'''Building PyTorch model from configuration: {config}''' ) SCREAMING_SNAKE_CASE = TaForConditionalGeneration(SCREAMING_SNAKE_CASE_ ) # Load weights from tf checkpoint load_tf_weights_in_ta(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) snake_case = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
406
1
import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class a (_lowerCAmelCase ): """simple docstring""" def __snake_case ( self : str ) -> str: __snake_case : Tuple = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(lowerCamelCase , "hidden_sizes" ) ) self.parent.assertTrue(hasattr(lowerCamelCase , "neck_hidden_sizes" ) ) self.parent.assertTrue(hasattr(lowerCamelCase , "num_attention_heads" ) ) class a : """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : Tuple=13 , lowerCamelCase : str=32 , lowerCamelCase : Dict=2 , lowerCamelCase : List[str]=3 , lowerCamelCase : Any=640 , lowerCamelCase : Optional[Any]=4 , lowerCamelCase : Tuple="silu" , lowerCamelCase : int=3 , lowerCamelCase : Dict=32 , lowerCamelCase : str=0.1 , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : Optional[Any]=0.1 , lowerCamelCase : Dict=0.02 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : Optional[int]=True , lowerCamelCase : Union[str, Any]=10 , lowerCamelCase : int=None , ) -> str: __snake_case : Optional[Any] = parent __snake_case : Optional[Any] = batch_size __snake_case : Any = image_size __snake_case : List[Any] = patch_size __snake_case : Any = num_channels __snake_case : Union[str, Any] = last_hidden_size __snake_case : Any = num_attention_heads __snake_case : Any = hidden_act __snake_case : Tuple = conv_kernel_size __snake_case : Any = output_stride __snake_case : Any = hidden_dropout_prob __snake_case : List[Any] = attention_probs_dropout_prob __snake_case : Optional[Any] = classifier_dropout_prob __snake_case : Union[str, Any] = use_labels __snake_case : Optional[int] = is_training __snake_case : Dict = num_labels __snake_case : Any = initializer_range __snake_case : Optional[int] = scope def __snake_case ( self : str ) -> Union[str, Any]: __snake_case : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case : List[Any] = None __snake_case : Optional[int] = None if self.use_labels: __snake_case : Optional[int] = ids_tensor([self.batch_size] , self.num_labels ) __snake_case : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __snake_case : Optional[Any] = self.get_config() return config, pixel_values, labels, pixel_labels def __snake_case ( self : Any ) -> Union[str, Any]: return MobileViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def __snake_case ( self : Any , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] ) -> Dict: __snake_case : List[Any] = MobileViTModel(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __snake_case : List[str] = model(lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __snake_case ( self : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : Dict , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple ) -> List[str]: __snake_case : str = self.num_labels __snake_case : List[Any] = MobileViTForImageClassification(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __snake_case : List[Any] = model(lowerCamelCase , labels=lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __snake_case ( self : Dict , lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict ) -> Dict: __snake_case : Union[str, Any] = self.num_labels __snake_case : Optional[int] = MobileViTForSemanticSegmentation(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __snake_case : Tuple = model(lowerCamelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) __snake_case : List[Any] = model(lowerCamelCase , labels=lowerCamelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __snake_case ( self : Optional[int] ) -> List[Any]: __snake_case : Optional[Any] = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = config_and_inputs __snake_case : Dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): """simple docstring""" __UpperCAmelCase : str = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) __UpperCAmelCase : Optional[Any] = ( { "feature-extraction": MobileViTModel, "image-classification": MobileViTForImageClassification, "image-segmentation": MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) __UpperCAmelCase : List[str] = False __UpperCAmelCase : int = False __UpperCAmelCase : Optional[int] = False __UpperCAmelCase : Optional[int] = False def __snake_case ( self : Optional[int] ) -> Dict: __snake_case : Tuple = MobileViTModelTester(self ) __snake_case : Any = MobileViTConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase ) def __snake_case ( self : Optional[int] ) -> Dict: self.config_tester.run_common_tests() @unittest.skip(reason="MobileViT does not use inputs_embeds" ) def __snake_case ( self : Dict ) -> Any: pass @unittest.skip(reason="MobileViT does not support input and output embeddings" ) def __snake_case ( self : Dict ) -> List[Any]: pass @unittest.skip(reason="MobileViT does not output attentions" ) def __snake_case ( self : int ) -> Dict: pass def __snake_case ( self : int ) -> Union[str, Any]: __snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Optional[Any] = model_class(lowerCamelCase ) __snake_case : List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : int = [*signature.parameters.keys()] __snake_case : List[str] = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCamelCase ) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def __snake_case ( self : int ) -> Tuple: pass def __snake_case ( self : Any ) -> Tuple: __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase ) def __snake_case ( self : Any ) -> str: def check_hidden_states_output(lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : Any ): __snake_case : int = model_class(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() with torch.no_grad(): __snake_case : int = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) ) __snake_case : Union[str, Any] = outputs.hidden_states __snake_case : int = 5 self.assertEqual(len(lowerCamelCase ) , lowerCamelCase ) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. __snake_case : List[Any] = 2 for i in range(len(lowerCamelCase ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) __snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : str = True check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : List[Any] = True check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def __snake_case ( self : Any ) -> Any: __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase ) def __snake_case ( self : List[str] ) -> List[str]: __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase ) @slow def __snake_case ( self : List[str] ) -> Any: for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : List[str] = MobileViTModel.from_pretrained(lowerCamelCase ) self.assertIsNotNone(lowerCamelCase ) def lowerCAmelCase_ ( ): __snake_case : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class a (unittest.TestCase ): """simple docstring""" @cached_property def __snake_case ( self : str ) -> Dict: return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small" ) if is_vision_available() else None @slow def __snake_case ( self : Union[str, Any] ) -> List[str]: __snake_case : Tuple = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small" ).to(lowerCamelCase ) __snake_case : Optional[Any] = self.default_image_processor __snake_case : Union[str, Any] = prepare_img() __snake_case : List[Any] = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase ) # forward pass with torch.no_grad(): __snake_case : Dict = model(**lowerCamelCase ) # verify the logits __snake_case : Union[str, Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCamelCase ) __snake_case : List[Any] = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) ) @slow def __snake_case ( self : str ) -> Optional[int]: __snake_case : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" ) __snake_case : str = model.to(lowerCamelCase ) __snake_case : int = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" ) __snake_case : Optional[int] = prepare_img() __snake_case : List[Any] = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase ) # forward pass with torch.no_grad(): __snake_case : List[str] = model(**lowerCamelCase ) __snake_case : Union[str, Any] = outputs.logits # verify the logits __snake_case : Tuple = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , lowerCamelCase ) __snake_case : Union[str, Any] = torch.tensor( [ [[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]], [[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]], [[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]], ] , device=lowerCamelCase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4 ) ) @slow def __snake_case ( self : Union[str, Any] ) -> Optional[int]: __snake_case : Optional[Any] = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" ) __snake_case : Tuple = model.to(lowerCamelCase ) __snake_case : Dict = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" ) __snake_case : List[Any] = prepare_img() __snake_case : List[Any] = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase ) # forward pass with torch.no_grad(): __snake_case : Any = model(**lowerCamelCase ) __snake_case : Dict = outputs.logits.detach().cpu() __snake_case : Any = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase , target_sizes=[(50, 60)] ) __snake_case : int = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , lowerCamelCase ) __snake_case : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase ) __snake_case : Optional[int] = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , lowerCamelCase )
81
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''', # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class _lowerCAmelCase ( lowerCamelCase ): lowercase_ : Union[str, Any] = '''convbert''' def __init__( self , a_=30522 , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1e-12 , a_=1 , a_=0 , a_=2 , a_=768 , a_=2 , a_=9 , a_=1 , a_=None , **a_ , ) -> Tuple: super().__init__( pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ , ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = embedding_size _UpperCAmelCase = head_ratio _UpperCAmelCase = conv_kernel_size _UpperCAmelCase = num_groups _UpperCAmelCase = classifier_dropout class _lowerCAmelCase ( lowerCamelCase ): @property def _a ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"} else: _UpperCAmelCase = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
657
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __UpperCAmelCase = {'configuration_encoder_decoder': ['EncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['EncoderDecoderModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['TFEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['FlaxEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
718
'''simple docstring''' import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device __UpperCAmelCase = False class a__ ( unittest.TestCase ): '''simple docstring''' pass @slow @require_torch_gpu class a__ ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self ) -> str: lowerCAmelCase__ = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' ) pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = pipe( image=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase__ = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
98
0
from __future__ import annotations def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list: snake_case__ = [] snake_case__ , snake_case__ = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) snake_case__ = result + left + right return input_list def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> list: if len(__lowerCAmelCase ) <= 1: return input_list snake_case__ = list(__lowerCAmelCase ) # iteration for two-way merging snake_case__ = 2 while p <= len(__lowerCAmelCase ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase ): snake_case__ = i snake_case__ = i + p - 1 snake_case__ = (low + high + 1) // 2 snake_case__ = merge(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # final merge of last two parts if p * 2 >= len(__lowerCAmelCase ): snake_case__ = i snake_case__ = merge(__lowerCAmelCase , 0 , __lowerCAmelCase , len(__lowerCAmelCase ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": lowerCamelCase__ : Optional[Any] = input("""Enter numbers separated by a comma:\n""").strip() if user_input == "": lowerCamelCase__ : str = [] else: lowerCamelCase__ : List[Any] = [int(item.strip()) for item in user_input.split(""",""")] print(iter_merge_sort(unsorted))
33
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class lowerCAmelCase_ ( unittest.TestCase ): def __init__( self , _UpperCamelCase , _UpperCamelCase=7 , _UpperCamelCase=3 , _UpperCamelCase=30 , _UpperCamelCase=400 , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=[0.5, 0.5, 0.5] , _UpperCamelCase=[0.5, 0.5, 0.5] , _UpperCamelCase=True , _UpperCamelCase=1 / 255 , _UpperCamelCase=True , )-> Tuple: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p _A = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333} _A = parent _A = batch_size _A = num_channels _A = min_resolution _A = max_resolution _A = do_resize _A = size _A = do_normalize _A = image_mean _A = image_std _A = do_rescale _A = rescale_factor _A = do_pad def UpperCamelCase ( self )-> Union[str, Any]: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def UpperCamelCase ( self , _UpperCamelCase , _UpperCamelCase=False )-> List[str]: if not batched: _A = image_inputs[0] if isinstance(_UpperCamelCase , Image.Image ): _A , _A = image.size else: _A , _A = image.shape[1], image.shape[2] if w < h: _A = int(self.size['shortest_edge'] * h / w ) _A = self.size['shortest_edge'] elif w > h: _A = self.size['shortest_edge'] _A = int(self.size['shortest_edge'] * w / h ) else: _A = self.size['shortest_edge'] _A = self.size['shortest_edge'] else: _A = [] for image in image_inputs: _A , _A = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _A = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[0] )[0] _A = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowerCAmelCase_ ( UpperCAmelCase , unittest.TestCase ): __UpperCAmelCase =DeformableDetrImageProcessor if is_vision_available() else None def UpperCamelCase ( self )-> Optional[int]: _A = DeformableDetrImageProcessingTester(self ) @property def UpperCamelCase ( self )-> Optional[Any]: return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase ( self )-> List[str]: _A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCamelCase , 'image_mean' ) ) self.assertTrue(hasattr(_UpperCamelCase , 'image_std' ) ) self.assertTrue(hasattr(_UpperCamelCase , 'do_normalize' ) ) self.assertTrue(hasattr(_UpperCamelCase , 'do_resize' ) ) self.assertTrue(hasattr(_UpperCamelCase , 'do_rescale' ) ) self.assertTrue(hasattr(_UpperCamelCase , 'do_pad' ) ) self.assertTrue(hasattr(_UpperCamelCase , 'size' ) ) def UpperCamelCase ( self )-> Any: _A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} ) self.assertEqual(image_processor.do_pad , _UpperCamelCase ) _A = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_UpperCamelCase ) self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} ) self.assertEqual(image_processor.do_pad , _UpperCamelCase ) def UpperCamelCase ( self )-> Optional[Any]: pass def UpperCamelCase ( self )-> Optional[Any]: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase , Image.Image ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(_UpperCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A , _A = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase ) _A = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCamelCase ( self )-> List[str]: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase , np.ndarray ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(_UpperCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCamelCase ( self )-> Dict: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase , torch.Tensor ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(_UpperCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def UpperCamelCase ( self )-> List[Any]: # prepare image and target _A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f: _A = json.loads(f.read() ) _A = {'image_id': 3_9769, 'annotations': target} # encode them _A = DeformableDetrImageProcessor() _A = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , return_tensors='pt' ) # verify pixel values _A = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , _UpperCamelCase ) _A = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCamelCase , atol=1e-4 ) ) # verify area _A = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCamelCase ) ) # verify boxes _A = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCamelCase ) _A = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCamelCase , atol=1e-3 ) ) # verify image_id _A = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCamelCase ) ) # verify is_crowd _A = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCamelCase ) ) # verify class_labels _A = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCamelCase ) ) # verify orig_size _A = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCamelCase ) ) # verify size _A = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCamelCase ) ) @slow def UpperCamelCase ( self )-> Any: # prepare image, target and masks_path _A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f: _A = json.loads(f.read() ) _A = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target} _A = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them _A = DeformableDetrImageProcessor(format='coco_panoptic' ) _A = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , masks_path=_UpperCamelCase , return_tensors='pt' ) # verify pixel values _A = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , _UpperCamelCase ) _A = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCamelCase , atol=1e-4 ) ) # verify area _A = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCamelCase ) ) # verify boxes _A = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCamelCase ) _A = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCamelCase , atol=1e-3 ) ) # verify image_id _A = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCamelCase ) ) # verify is_crowd _A = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCamelCase ) ) # verify class_labels _A = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCamelCase ) ) # verify masks _A = 82_2873 self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _UpperCamelCase ) # verify orig_size _A = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCamelCase ) ) # verify size _A = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCamelCase ) )
292
0
from unittest import TestCase from datasets import Sequence, Value from datasets.arrow_dataset import Dataset class _a ( lowerCamelCase_ ): """simple docstring""" def __lowerCAmelCase ( self ): return [ {"col_1": 3, "col_2": "a"}, {"col_1": 2, "col_2": "b"}, {"col_1": 1, "col_2": "c"}, {"col_1": 0, "col_2": "d"}, ] def __lowerCAmelCase ( self ): _lowercase ={"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]} return Dataset.from_dict(lowerCAmelCase_ ) def __lowerCAmelCase ( self ): _lowercase =self._create_example_records() _lowercase =Dataset.from_list(lowerCAmelCase_ ) self.assertListEqual(dset.column_names , ["col_1", "col_2"] ) for i, r in enumerate(lowerCAmelCase_ ): self.assertDictEqual(lowerCAmelCase_ , example_records[i] ) def __lowerCAmelCase ( self ): _lowercase =self._create_example_records() _lowercase =Dataset.from_list(lowerCAmelCase_ ) _lowercase =Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} ) self.assertEqual(dset.info , dset_from_dict.info ) def __lowerCAmelCase ( self ): # checks what happens with missing columns _lowercase =[{"col_1": 1}, {"col_2": "x"}] _lowercase =Dataset.from_list(lowerCAmelCase_ ) self.assertDictEqual(dset[0] , {"col_1": 1} ) self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns def __lowerCAmelCase ( self ): # checks if the type can be inferred from the second record _lowercase =[{"col_1": []}, {"col_1": [1, 2]}] _lowercase =Dataset.from_list(lowerCAmelCase_ ) self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) ) def __lowerCAmelCase ( self ): _lowercase =Dataset.from_list([] ) self.assertEqual(len(lowerCAmelCase_ ) , 0 ) self.assertListEqual(dset.column_names , [] )
594
from __future__ import annotations from collections import Counter from random import random class _a : """simple docstring""" def __init__( self ): _lowercase ={} def __lowerCAmelCase ( self , lowerCAmelCase_ ): _lowercase ={} def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): if nodea not in self.connections: self.add_node(lowerCAmelCase_ ) if nodea not in self.connections: self.add_node(lowerCAmelCase_ ) _lowercase =probability def __lowerCAmelCase ( self ): return list(self.connections ) def __lowerCAmelCase ( self , lowerCAmelCase_ ): _lowercase =0 _lowercase =random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def __lowerCamelCase ( __a : str , __a : list[tuple[str, str, float]] , __a : int ) -> dict[str, int]: _lowercase =MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(__a , __a , __a ) _lowercase =Counter(graph.get_nodes() ) _lowercase =start for _ in range(__a ): _lowercase =graph.transition(__a ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
594
1