code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case :Dict =logging.get_logger(__name__) class lowerCAmelCase__ ( _lowerCamelCase ): A_ : List[str] = 'timm_backbone' def __init__( self : Optional[Any] , __UpperCamelCase : Any=None , __UpperCamelCase : List[Any]=3 , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Dict=None , **__UpperCamelCase : Union[str, Any] , ) -> Optional[Any]: super().__init__(**__UpperCamelCase ) A = backbone A = num_channels A = features_only A = use_pretrained_backbone A = True A = out_indices if out_indices is not None else (-1,)
106
import logging import os import threading import time try: import warnings except ImportError: __snake_case :Any =None try: import msvcrt except ImportError: __snake_case :Union[str, Any] =None try: import fcntl except ImportError: __snake_case :str =None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: __snake_case :str =OSError # Data # ------------------------------------------------ __snake_case :Any =[ 'Timeout', 'BaseFileLock', 'WindowsFileLock', 'UnixFileLock', 'SoftFileLock', 'FileLock', ] __snake_case :str ='3.0.12' __snake_case :str =None def lowerCamelCase_ ( ) -> List[str]: '''simple docstring''' global _logger A = _logger or logging.getLogger(__name__ ) return _logger class lowerCAmelCase__ ( _lowerCamelCase ): def __init__( self : Tuple , __UpperCamelCase : Union[str, Any] ) -> List[Any]: A = lock_file return None def __str__( self : List[Any] ) -> int: A = f'''The file lock \'{self.lock_file}\' could not be acquired.''' return temp class lowerCAmelCase__ : def __init__( self : int , __UpperCamelCase : Union[str, Any] ) -> List[str]: A = lock return None def __enter__( self : Dict ) -> Dict: return self.lock def __exit__( self : int , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any ) -> Optional[int]: self.lock.release() return None class lowerCAmelCase__ : def __init__( self : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=-1 , __UpperCamelCase : Optional[Any]=None ) -> Dict: A = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long A = self.hash_filename_if_too_long(__UpperCamelCase , __UpperCamelCase ) # The path to the lock file. A = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. A = None # The default timeout value. A = timeout # We use this lock primarily for the lock counter. A = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. A = 0 return None @property def __UpperCamelCase ( self : str ) -> Union[str, Any]: return self._lock_file @property def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]: return self._timeout @timeout.setter def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : Any ) -> Tuple: A = float(__UpperCamelCase ) return None def __UpperCamelCase ( self : Optional[Any] ) -> Any: raise NotImplementedError() def __UpperCamelCase ( self : int ) -> str: raise NotImplementedError() @property def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]: return self._lock_file_fd is not None def __UpperCamelCase ( self : int , __UpperCamelCase : List[str]=None , __UpperCamelCase : Any=0.0_5 ) -> Any: # Use the default timeout, if no timeout is provided. if timeout is None: A = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 A = id(self ) A = self._lock_file A = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(f'''Attempting to acquire lock {lock_id} on {lock_filename}''' ) self._acquire() if self.is_locked: logger().debug(f'''Lock {lock_id} acquired on {lock_filename}''' ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(f'''Timeout on acquiring lock {lock_id} on {lock_filename}''' ) raise Timeout(self._lock_file ) else: logger().debug( f'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' ) time.sleep(__UpperCamelCase ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: A = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Tuple=False ) -> Tuple: with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: A = id(self ) A = self._lock_file logger().debug(f'''Attempting to release lock {lock_id} on {lock_filename}''' ) self._release() A = 0 logger().debug(f'''Lock {lock_id} released on {lock_filename}''' ) return None def __enter__( self : int ) -> Dict: self.acquire() return self def __exit__( self : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str ) -> Dict: self.release() return None def __del__( self : Union[str, Any] ) -> Optional[int]: self.release(force=__UpperCamelCase ) return None def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : int ) -> str: A = os.path.basename(__UpperCamelCase ) if len(__UpperCamelCase ) > max_length and max_length > 0: A = os.path.dirname(__UpperCamelCase ) A = str(hash(__UpperCamelCase ) ) A = filename[: max_length - len(__UpperCamelCase ) - 8] + '...' + hashed_filename + '.lock' return os.path.join(__UpperCamelCase , __UpperCamelCase ) else: return path class lowerCAmelCase__ ( _lowerCamelCase ): def __init__( self : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple=-1 , __UpperCamelCase : Optional[Any]=None ) -> Union[str, Any]: from .file_utils import relative_to_absolute_path super().__init__(__UpperCamelCase , timeout=__UpperCamelCase , max_filename_length=__UpperCamelCase ) A = '\\\\?\\' + relative_to_absolute_path(self.lock_file ) def __UpperCamelCase ( self : Any ) -> Any: A = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: A = os.open(self._lock_file , __UpperCamelCase ) except OSError: pass else: try: msvcrt.locking(__UpperCamelCase , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(__UpperCamelCase ) else: A = fd return None def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]: A = self._lock_file_fd A = None msvcrt.locking(__UpperCamelCase , msvcrt.LK_UNLCK , 1 ) os.close(__UpperCamelCase ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class lowerCAmelCase__ ( _lowerCamelCase ): def __init__( self : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : List[Any]=-1 , __UpperCamelCase : Dict=None ) -> Dict: A = os.statvfs(os.path.dirname(__UpperCamelCase ) ).f_namemax super().__init__(__UpperCamelCase , timeout=__UpperCamelCase , max_filename_length=__UpperCamelCase ) def __UpperCamelCase ( self : Any ) -> int: A = os.O_RDWR | os.O_CREAT | os.O_TRUNC A = os.open(self._lock_file , __UpperCamelCase ) try: fcntl.flock(__UpperCamelCase , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(__UpperCamelCase ) else: A = fd return None def __UpperCamelCase ( self : Optional[int] ) -> int: # Do not remove the lockfile: # # https://github.com/benediktschmitt/py-filelock/issues/31 # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition A = self._lock_file_fd A = None fcntl.flock(__UpperCamelCase , fcntl.LOCK_UN ) os.close(__UpperCamelCase ) return None class lowerCAmelCase__ ( _lowerCamelCase ): def __UpperCamelCase ( self : int ) -> Optional[int]: A = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: A = os.open(self._lock_file , __UpperCamelCase ) except OSError: pass else: A = fd return None def __UpperCamelCase ( self : Optional[Any] ) -> List[str]: os.close(self._lock_file_fd ) A = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None __snake_case :List[str] =None if msvcrt: __snake_case :List[Any] =WindowsFileLock elif fcntl: __snake_case :Any =UnixFileLock else: __snake_case :Tuple =SoftFileLock if warnings is not None: warnings.warn('only soft file lock is available')
106
1
'''simple docstring''' import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class a ( UpperCamelCase_ , unittest.TestCase ): '''simple docstring''' __lowerCAmelCase : Optional[Any] = FunnelTokenizer __lowerCAmelCase : Optional[Any] = FunnelTokenizerFast __lowerCAmelCase : int = True __lowerCAmelCase : List[str] = True def __UpperCamelCase ( self ) -> List[Any]: super().setUp() _a : Optional[Any] = [ '''<unk>''', '''<cls>''', '''<sep>''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] _a : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def __UpperCamelCase ( self , **lowerCamelCase_ ) -> Any: return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def __UpperCamelCase ( self , **lowerCamelCase_ ) -> Optional[int]: return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def __UpperCamelCase ( self , lowerCamelCase_ ) -> str: _a : Optional[Any] = '''UNwant\u00E9d,running''' _a : int = '''unwanted, running''' return input_text, output_text def __UpperCamelCase ( self ) -> Union[str, Any]: _a : List[Any] = self.tokenizer_class(self.vocab_file ) _a : int = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(UpperCamelCase__ , ['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 1_0, 8, 9] ) def __UpperCamelCase ( self ) -> List[str]: _a : Dict = self.get_tokenizers(do_lower_case=UpperCamelCase__ ) for tokenizer in tokenizers: _a : int = tokenizer('UNwant\u00E9d,running' ) _a : Optional[Any] = len(inputs['input_ids'] ) - 1 self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len ) _a : List[Any] = tokenizer('UNwant\u00E9d,running' , 'UNwant\u00E9d,running' ) self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len + [1] * sentence_len )
712
'''simple docstring''' from unittest.mock import Mock, patch from file_transfer.send_file import send_file @patch('socket.socket' ) @patch('builtins.open' ) def UpperCAmelCase_ ( A , A ): '''simple docstring''' _a : List[str] = Mock() _a : str = conn, Mock() _a : Union[str, Any] = iter([1, None] ) _a : List[str] = lambda A : next(A ) # ===== invoke ===== send_file(filename='mytext.txt' , testing=A ) # ===== ensurance ===== sock.assert_called_once() sock.return_value.bind.assert_called_once() sock.return_value.listen.assert_called_once() sock.return_value.accept.assert_called_once() conn.recv.assert_called_once() file.return_value.__enter__.assert_called_once() file.return_value.__enter__.return_value.read.assert_called() conn.send.assert_called_once() conn.close.assert_called_once() sock.return_value.shutdown.assert_called_once() sock.return_value.close.assert_called_once()
424
0
"""simple docstring""" from numpy import exp, pi, sqrt def A_ ( __lowercase , __lowercase = 0.0 , __lowercase = 1.0 ): return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
357
'''simple docstring''' def lowerCamelCase( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> tuple[float, float]: # Check if the input is valid if not len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) == 3: raise ValueError('Please enter a valid equation.' ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError('Both a & b of two equations can\'t be zero.' ) # Extract the coefficients A_ , A_ , A_ = equationa A_ , A_ , A_ = equationa # Calculate the determinants of the matrices A_ = aa * ba - aa * ba A_ = ca * ba - ca * ba A_ = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError('Infinite solutions. (Consistent system)' ) else: raise ValueError('No solution. (Inconsistent system)' ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: A_ = determinant_x / determinant A_ = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
366
0
"""simple docstring""" from math import pow def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , ): if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count A__ = int(pow(UpperCAmelCase_ , UpperCAmelCase_ ) ) if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n A__ , A__ = backtrack( UpperCAmelCase_ , UpperCAmelCase_ , current_number + 1 , UpperCAmelCase_ , UpperCAmelCase_ ) current_sum -= i_to_n if i_to_n < needed_sum: # If the power of i is less than needed_sum, then try with the next power. A__ , A__ = backtrack( UpperCAmelCase_ , UpperCAmelCase_ , current_number + 1 , UpperCAmelCase_ , UpperCAmelCase_ ) return current_sum, solutions_count def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ): if not (1 <= needed_sum <= 1000 and 2 <= power <= 10): raise ValueError( """Invalid input\n""" """needed_sum must be between 1 and 1000, power between 2 and 10.""" ) return backtrack(UpperCAmelCase_ , UpperCAmelCase_ , 1 , 0 , 0 )[1] # Return the solutions_count if __name__ == "__main__": import doctest doctest.testmod()
500
"""simple docstring""" from collections import defaultdict def _snake_case ( UpperCAmelCase_ : int ): A__ = 1 A__ = True for v in tree[start]: if v not in visited: ret += dfs(UpperCAmelCase_ ) if ret % 2 == 0: cuts.append(UpperCAmelCase_ ) return ret def _snake_case ( ): dfs(1 ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ : Any = 1_0, 9 SCREAMING_SNAKE_CASE_ : Any = defaultdict(list) SCREAMING_SNAKE_CASE_ : dict[int, bool] = {} SCREAMING_SNAKE_CASE_ : list[int] = [] SCREAMING_SNAKE_CASE_ : Optional[int] = 0 SCREAMING_SNAKE_CASE_ : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (1_0, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
500
1
import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() lowercase_ = logging.get_logger('''transformers.models.encodec''') lowercase_ = { '''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''', '''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''', '''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''', '''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''', } lowercase_ = { '''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''', '''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''', '''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''', '''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''', '''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''', '''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''', '''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''', '''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''', '''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''', '''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''', '''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''', '''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''', '''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''', '''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''', '''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''', '''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''', '''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''', '''encoder.model.13.lstm''': '''encoder.layers.13.lstm''', '''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''', } lowercase_ = { '''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''', '''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''', '''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''', '''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''', '''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''', '''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''', '''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''', '''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''', '''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''', '''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''', '''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''', '''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''', '''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''', '''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''', '''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''', '''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''', '''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''', '''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''', } lowercase_ = { '''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''', '''decoder.model.1.lstm''': '''decoder.layers.1.lstm''', '''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''', '''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''', '''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''', '''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''', '''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''', '''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''', '''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''', '''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''', '''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''', '''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''', '''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''', '''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''', '''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''', '''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''', '''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''', '''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''', '''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''', } lowercase_ = { '''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''', '''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''', '''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''', '''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''', '''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''', '''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''', '''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''', '''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''', '''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''', '''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''', '''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''', '''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''', '''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''', '''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''', '''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''', '''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''', '''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''', '''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''', } lowercase_ = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } lowercase_ = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } lowercase_ = [] lowercase_ = [] def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase ) ->Optional[int]: """simple docstring""" for attribute in key.split('''.''' ): __magic_name__ : Optional[Any] = getattr(UpperCAmelCase, UpperCAmelCase ) if weight_type is not None: __magic_name__ : Tuple = getattr(UpperCAmelCase, UpperCAmelCase ).shape else: __magic_name__ : Dict = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": __magic_name__ : Dict = value elif weight_type == "weight_g": __magic_name__ : List[str] = value elif weight_type == "weight_v": __magic_name__ : Tuple = value elif weight_type == "bias": __magic_name__ : Union[str, Any] = value elif weight_type == "running_mean": __magic_name__ : int = value elif weight_type == "running_var": __magic_name__ : int = value elif weight_type == "num_batches_tracked": __magic_name__ : Any = value elif weight_type == "weight_ih_l0": __magic_name__ : Optional[int] = value elif weight_type == "weight_hh_l0": __magic_name__ : str = value elif weight_type == "bias_ih_l0": __magic_name__ : Any = value elif weight_type == "bias_hh_l0": __magic_name__ : Union[str, Any] = value elif weight_type == "weight_ih_l1": __magic_name__ : List[Any] = value elif weight_type == "weight_hh_l1": __magic_name__ : Any = value elif weight_type == "bias_ih_l1": __magic_name__ : Dict = value elif weight_type == "bias_hh_l1": __magic_name__ : Dict = value else: __magic_name__ : Optional[int] = value logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' ) def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->Union[str, Any]: """simple docstring""" for key in ignore_keys: if key.endswith('''.*''' ): if name.startswith(key[:-1] ): return True elif ".*." in key: __magic_name__ , __magic_name__ : str = key.split('''.*.''' ) if prefix in name and suffix in name: return True elif key in name: return True return False def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase ) ->str: """simple docstring""" __magic_name__ : Optional[int] = [] if model_name == "encodec_24khz" or "encodec_32khz": __magic_name__ : int = MAPPING_24K elif model_name == "encodec_48khz": __magic_name__ : Tuple = MAPPING_48K else: raise ValueError(F'''Unsupported model: {model_name}''' ) for name, value in orig_dict.items(): if should_ignore(UpperCAmelCase, UpperCAmelCase ): logger.info(F'''{name} was ignored''' ) continue __magic_name__ : int = False for key, mapped_key in MAPPING.items(): if "*" in key: __magic_name__ , __magic_name__ : str = key.split('''.*.''' ) if prefix in name and suffix in name: __magic_name__ : Optional[Any] = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ): continue __magic_name__ : Union[str, Any] = True if "*" in mapped_key: __magic_name__ : Optional[int] = name.split(UpperCAmelCase )[0].split('''.''' )[-2] __magic_name__ : Optional[int] = mapped_key.replace('''*''', UpperCAmelCase ) if "weight_g" in name: __magic_name__ : Tuple = '''weight_g''' elif "weight_v" in name: __magic_name__ : Any = '''weight_v''' elif "weight_ih_l0" in name: __magic_name__ : List[Any] = '''weight_ih_l0''' elif "weight_hh_l0" in name: __magic_name__ : Any = '''weight_hh_l0''' elif "bias_ih_l0" in name: __magic_name__ : Optional[Any] = '''bias_ih_l0''' elif "bias_hh_l0" in name: __magic_name__ : Union[str, Any] = '''bias_hh_l0''' elif "weight_ih_l1" in name: __magic_name__ : int = '''weight_ih_l1''' elif "weight_hh_l1" in name: __magic_name__ : str = '''weight_hh_l1''' elif "bias_ih_l1" in name: __magic_name__ : Dict = '''bias_ih_l1''' elif "bias_hh_l1" in name: __magic_name__ : List[Any] = '''bias_hh_l1''' elif "bias" in name: __magic_name__ : Any = '''bias''' elif "weight" in name: __magic_name__ : int = '''weight''' elif "running_mean" in name: __magic_name__ : Tuple = '''running_mean''' elif "running_var" in name: __magic_name__ : int = '''running_var''' elif "num_batches_tracked" in name: __magic_name__ : List[Any] = '''num_batches_tracked''' else: __magic_name__ : str = None set_recursively(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase ) continue if not is_used: unused_weights.append(UpperCAmelCase ) logger.warning(F'''Unused weights: {unused_weights}''' ) @torch.no_grad() def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase=None, UpperCAmelCase=None, ) ->Any: """simple docstring""" if config_path is not None: __magic_name__ : Optional[int] = EncodecConfig.from_pretrained(UpperCAmelCase ) else: __magic_name__ : List[str] = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": __magic_name__ : Optional[int] = [8, 5, 4, 4] __magic_name__ : Tuple = [2.2] __magic_name__ : Dict = 64 __magic_name__ : str = 3_2000 __magic_name__ : Tuple = 2048 __magic_name__ : List[Any] = False __magic_name__ : List[Any] = False __magic_name__ : Tuple = False elif model_name == "encodec_48khz": __magic_name__ : Dict = [8, 5, 4, 2] __magic_name__ : Dict = [3.0, 6.0, 12.0, 24.0] __magic_name__ : Optional[int] = 4_8000 __magic_name__ : str = 2 __magic_name__ : str = False __magic_name__ : Optional[Any] = '''time_group_norm''' __magic_name__ : str = True __magic_name__ : Optional[int] = 1.0 __magic_name__ : int = 0.01 else: raise ValueError(F'''Unknown model name: {model_name}''' ) __magic_name__ : Dict = EncodecModel(UpperCAmelCase ) __magic_name__ : Optional[int] = EncodecFeatureExtractor( feature_size=config.audio_channels, sampling_rate=config.sampling_rate, chunk_length_s=config.chunk_length_s, overlap=config.overlap, ) feature_extractor.save_pretrained(UpperCAmelCase ) __magic_name__ : Optional[Any] = torch.load(UpperCAmelCase ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights __magic_name__ : str = original_checkpoint['''best_state'''] recursively_load_weights(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase ) model.save_pretrained(UpperCAmelCase ) if repo_id: print('''Pushing to the hub...''' ) feature_extractor.push_to_hub(UpperCAmelCase ) model.push_to_hub(UpperCAmelCase ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument( '''--model''', default='''encodec_24khz''', type=str, help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) lowercase_ = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
154
from timeit import timeit lowercase_ = { '''MALAYALAM''': True, '''String''': False, '''rotor''': True, '''level''': True, '''A''': True, '''BB''': True, '''ABC''': False, '''amanaplanacanalpanama''': True, # "a man a plan a canal panama" } # Ensure our test data is valid assert all((key == key[::-1]) is value for key, value in test_data.items()) def lowerCAmelCase ( UpperCAmelCase ) ->bool: """simple docstring""" __magic_name__ : List[str] = 0 __magic_name__ : List[Any] = len(UpperCAmelCase ) - 1 while start_i < end_i: if s[start_i] == s[end_i]: start_i += 1 end_i -= 1 else: return False return True def lowerCAmelCase ( UpperCAmelCase ) ->bool: """simple docstring""" __magic_name__ : Optional[Any] = len(UpperCAmelCase ) // 2 __magic_name__ : int = len(UpperCAmelCase ) # We need to traverse till half of the length of string # as we can get access of the i'th last element from # i'th index. # eg: [0,1,2,3,4,5] => 4th index can be accessed # with the help of 1st index (i==n-i-1) # where n is length of string return all(s[i] == s[n - i - 1] for i in range(UpperCAmelCase ) ) def lowerCAmelCase ( UpperCAmelCase ) ->bool: """simple docstring""" if len(UpperCAmelCase ) <= 2: return True if s[0] == s[len(UpperCAmelCase ) - 1]: return is_palindrome_recursive(s[1:-1] ) else: return False def lowerCAmelCase ( UpperCAmelCase ) ->bool: """simple docstring""" return s == s[::-1] def lowerCAmelCase ( UpperCAmelCase ) ->None: """simple docstring""" __magic_name__ : str = F'''all({name}(key) is value for key, value in test_data.items())''' __magic_name__ : Optional[Any] = F'''from __main__ import test_data, {name}''' __magic_name__ : int = 50_0000 __magic_name__ : List[str] = timeit(stmt=UpperCAmelCase, setup=UpperCAmelCase, number=UpperCAmelCase ) print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' ) if __name__ == "__main__": for key, value in test_data.items(): assert is_palindrome(key) is is_palindrome_recursive(key) assert is_palindrome(key) is is_palindrome_slice(key) print(f"{key:21} {value}") print('''a man a plan a canal panama''') # finished 500,000 runs in 0.46793 seconds benchmark_function('''is_palindrome_slice''') # finished 500,000 runs in 0.85234 seconds benchmark_function('''is_palindrome''') # finished 500,000 runs in 1.32028 seconds benchmark_function('''is_palindrome_recursive''') # finished 500,000 runs in 2.08679 seconds benchmark_function('''is_palindrome_traversal''')
154
1
"""simple docstring""" import os from math import logaa def _SCREAMING_SNAKE_CASE ( __snake_case : str = "base_exp.txt" ): '''simple docstring''' lowercase = 0 lowercase = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(__snake_case ) , __snake_case ) ) ): lowercase , lowercase = list(map(__snake_case , line.split(',' ) ) ) if x * logaa(__snake_case ) > largest: lowercase = x * logaa(__snake_case ) lowercase = i + 1 return result if __name__ == "__main__": print(solution())
134
"""simple docstring""" import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class a : def UpperCamelCase_ ( self ): torch.manual_seed(0 ) lowercase = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' ) torch.manual_seed(0 ) lowercase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' ) torch.manual_seed(0 ) lowercase = UNetaDConditionModel( sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[ 'ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D', ] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) lowercase = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=_lowerCamelCase , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , ) torch.manual_seed(0 ) lowercase = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def UpperCamelCase_ ( self ): torch.manual_seed(0 ) lowercase = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' ) torch.manual_seed(0 ) lowercase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' ) torch.manual_seed(0 ) lowercase = UNetaDConditionModel( sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[ 'ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D', ] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.4_1_4 , time_embedding_act_fn='gelu' , time_embedding_dim=3_2 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) lowercase = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=_lowerCamelCase , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , ) torch.manual_seed(0 ) lowercase = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_0_0_1 , beta_end=0.0_2 , ) torch.manual_seed(0 ) lowercase = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def UpperCamelCase_ ( self ): lowercase = self.get_dummy_components() lowercase = self.pipeline_class(**_lowerCamelCase ) pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) lowercase = self.get_dummy_inputs(_lowerCamelCase ) lowercase = inputs['prompt'] lowercase = inputs['generator'] lowercase = inputs['num_inference_steps'] lowercase = inputs['output_type'] if "image" in inputs: lowercase = inputs['image'] else: lowercase = None if "mask_image" in inputs: lowercase = inputs['mask_image'] else: lowercase = None if "original_image" in inputs: lowercase = inputs['original_image'] else: lowercase = None lowercase , lowercase = pipe.encode_prompt(_lowerCamelCase ) # inputs with prompt converted to embeddings lowercase = { 'prompt_embeds': prompt_embeds, 'negative_prompt_embeds': negative_prompt_embeds, 'generator': generator, 'num_inference_steps': num_inference_steps, 'output_type': output_type, } if image is not None: lowercase = image if mask_image is not None: lowercase = mask_image if original_image is not None: lowercase = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) lowercase = pipe(**_lowerCamelCase )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(_lowerCamelCase ) lowercase = self.pipeline_class.from_pretrained(_lowerCamelCase ) pipe_loaded.to(_lowerCamelCase ) pipe_loaded.set_progress_bar_config(disable=_lowerCamelCase ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(_lowerCamelCase , _lowerCamelCase ) is None , F'`{optional_component}` did not stay set to None after loading.' , ) lowercase = self.get_dummy_inputs(_lowerCamelCase ) lowercase = inputs['generator'] lowercase = inputs['num_inference_steps'] lowercase = inputs['output_type'] # inputs with prompt converted to embeddings lowercase = { 'prompt_embeds': prompt_embeds, 'negative_prompt_embeds': negative_prompt_embeds, 'generator': generator, 'num_inference_steps': num_inference_steps, 'output_type': output_type, } if image is not None: lowercase = image if mask_image is not None: lowercase = mask_image if original_image is not None: lowercase = original_image lowercase = pipe_loaded(**_lowerCamelCase )[0] lowercase = np.abs(to_np(_lowerCamelCase ) - to_np(_lowerCamelCase ) ).max() self.assertLess(_lowerCamelCase , 1e-4 ) def UpperCamelCase_ ( self ): lowercase = self.get_dummy_components() lowercase = self.pipeline_class(**_lowerCamelCase ) pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) lowercase = self.get_dummy_inputs(_lowerCamelCase ) lowercase = pipe(**_lowerCamelCase )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(_lowerCamelCase ) lowercase = self.pipeline_class.from_pretrained(_lowerCamelCase ) pipe_loaded.to(_lowerCamelCase ) pipe_loaded.set_progress_bar_config(disable=_lowerCamelCase ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests lowercase = self.get_dummy_inputs(_lowerCamelCase ) lowercase = pipe_loaded(**_lowerCamelCase )[0] lowercase = np.abs(to_np(_lowerCamelCase ) - to_np(_lowerCamelCase ) ).max() self.assertLess(_lowerCamelCase , 1e-4 )
134
1
"""simple docstring""" SCREAMING_SNAKE_CASE__ : Dict =[ 999, 800, 799, 600, 599, 500, 400, 399, 377, 355, 333, 311, 288, 266, 244, 222, 200, 199, 177, 155, 133, 111, 88, 66, 44, 22, 0, ] SCREAMING_SNAKE_CASE__ : List[Any] =[ 999, 976, 952, 928, 905, 882, 858, 857, 810, 762, 715, 714, 572, 429, 428, 286, 285, 238, 190, 143, 142, 118, 95, 71, 47, 24, 0, ] SCREAMING_SNAKE_CASE__ : Optional[int] =[ 999, 988, 977, 966, 955, 944, 933, 922, 911, 900, 899, 879, 859, 840, 820, 800, 799, 766, 733, 700, 699, 650, 600, 599, 500, 499, 400, 399, 350, 300, 299, 266, 233, 200, 199, 179, 159, 140, 120, 100, 99, 88, 77, 66, 55, 44, 33, 22, 11, 0, ] SCREAMING_SNAKE_CASE__ : List[Any] =[ 999, 995, 992, 989, 985, 981, 978, 975, 971, 967, 964, 961, 957, 956, 951, 947, 942, 937, 933, 928, 923, 919, 914, 913, 908, 903, 897, 892, 887, 881, 876, 871, 870, 864, 858, 852, 846, 840, 834, 828, 827, 820, 813, 806, 799, 792, 785, 784, 777, 770, 763, 756, 749, 742, 741, 733, 724, 716, 707, 699, 698, 688, 677, 666, 656, 655, 645, 634, 623, 613, 612, 598, 584, 570, 569, 555, 541, 527, 526, 505, 484, 483, 462, 440, 439, 396, 395, 352, 351, 308, 307, 264, 263, 220, 219, 176, 132, 88, 44, 0, ] SCREAMING_SNAKE_CASE__ : Union[str, Any] =[ 999, 997, 995, 992, 990, 988, 986, 984, 981, 979, 977, 975, 972, 970, 968, 966, 964, 961, 959, 957, 956, 954, 951, 949, 946, 944, 941, 939, 936, 934, 931, 929, 926, 924, 921, 919, 916, 914, 913, 910, 907, 905, 902, 899, 896, 893, 891, 888, 885, 882, 879, 877, 874, 871, 870, 867, 864, 861, 858, 855, 852, 849, 846, 843, 840, 837, 834, 831, 828, 827, 824, 821, 817, 814, 811, 808, 804, 801, 798, 795, 791, 788, 785, 784, 780, 777, 774, 770, 766, 763, 760, 756, 752, 749, 746, 742, 741, 737, 733, 730, 726, 722, 718, 714, 710, 707, 703, 699, 698, 694, 690, 685, 681, 677, 673, 669, 664, 660, 656, 655, 650, 646, 641, 636, 632, 627, 622, 618, 613, 612, 607, 602, 596, 591, 586, 580, 575, 570, 569, 563, 557, 551, 545, 539, 533, 527, 526, 519, 512, 505, 498, 491, 484, 483, 474, 466, 457, 449, 440, 439, 428, 418, 407, 396, 395, 381, 366, 352, 351, 330, 308, 307, 286, 264, 263, 242, 220, 219, 176, 175, 132, 131, 88, 44, 0, ] SCREAMING_SNAKE_CASE__ : Optional[int] =[ 999, 991, 982, 974, 966, 958, 950, 941, 933, 925, 916, 908, 900, 899, 874, 850, 825, 800, 799, 700, 600, 500, 400, 300, 200, 100, 0, ] SCREAMING_SNAKE_CASE__ : Optional[Any] =[ 999, 992, 985, 978, 971, 964, 957, 949, 942, 935, 928, 921, 914, 907, 900, 899, 879, 859, 840, 820, 800, 799, 766, 733, 700, 699, 650, 600, 599, 500, 499, 400, 399, 300, 299, 200, 199, 100, 99, 0, ] SCREAMING_SNAKE_CASE__ : Optional[Any] =[ 999, 996, 992, 989, 985, 982, 979, 975, 972, 968, 965, 961, 958, 955, 951, 948, 944, 941, 938, 934, 931, 927, 924, 920, 917, 914, 910, 907, 903, 900, 899, 891, 884, 876, 869, 861, 853, 846, 838, 830, 823, 815, 808, 800, 799, 788, 777, 766, 755, 744, 733, 722, 711, 700, 699, 688, 677, 666, 655, 644, 633, 622, 611, 600, 599, 585, 571, 557, 542, 528, 514, 500, 499, 485, 471, 457, 442, 428, 414, 400, 399, 379, 359, 340, 320, 300, 299, 279, 259, 240, 220, 200, 199, 166, 133, 100, 99, 66, 33, 0, ]
434
"""simple docstring""" from jiwer import compute_measures import datasets SCREAMING_SNAKE_CASE__ : Dict ='\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n' SCREAMING_SNAKE_CASE__ : Dict ='\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n' SCREAMING_SNAKE_CASE__ : Dict ='\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCAmelCase ( datasets.Metric ): """simple docstring""" def a__ ( self ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/Word_error_rate''', ] , ) def a__ ( self , _lowercase=None , _lowercase=None , _lowercase=False ) -> Dict: if concatenate_texts: return compute_measures(_lowercase , _lowercase )["wer"] else: _lowerCamelCase : Any = 0 _lowerCamelCase : Tuple = 0 for prediction, reference in zip(_lowercase , _lowercase ): _lowerCamelCase : Any = compute_measures(_lowercase , _lowercase ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
434
1
import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py __lowerCamelCase : Dict = '''src/transformers''' __lowerCamelCase : Any = '''docs/source/en''' __lowerCamelCase : Tuple = '''.''' def lowercase__ ( __A: Optional[int] ,__A: int ,__A: Optional[Any] ): '''simple docstring''' with open(__A ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f: __magic_name__ : Tuple = f.readlines() # Find the start prompt. __magic_name__ : List[Any] = 0 while not lines[start_index].startswith(__A ): start_index += 1 start_index += 1 __magic_name__ : Optional[int] = start_index while not lines[end_index].startswith(__A ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | __lowerCamelCase : Any = '''Model|Encoder|Decoder|ForConditionalGeneration''' # Regexes that match TF/Flax/PT model names. __lowerCamelCase : Optional[int] = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') __lowerCamelCase : List[Any] = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. __lowerCamelCase : Union[str, Any] = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') # This is to make sure the transformers module imported is the one in the repo. __lowerCamelCase : Optional[Any] = direct_transformers_import(TRANSFORMERS_PATH) def lowercase__ ( __A: str ): '''simple docstring''' __magic_name__ : Optional[Any] = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' ,__A ) return [m.group(0 ) for m in matches] def lowercase__ ( __A: Optional[int] ,__A: Any ): '''simple docstring''' __magic_name__ : List[str] = 2 if text == '''✅''' or text == '''❌''' else len(__A ) __magic_name__ : Tuple = (width - text_length) // 2 __magic_name__ : Dict = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def lowercase__ ( ): '''simple docstring''' __magic_name__ : Dict = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES __magic_name__ : int = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } __magic_name__ : Tuple = {name: config.replace('''Config''' ,'''''' ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. __magic_name__ : int = collections.defaultdict(__A ) __magic_name__ : Union[str, Any] = collections.defaultdict(__A ) __magic_name__ : Union[str, Any] = collections.defaultdict(__A ) __magic_name__ : Optional[int] = collections.defaultdict(__A ) __magic_name__ : Dict = collections.defaultdict(__A ) # Let's lookup through all transformers object (once). for attr_name in dir(__A ): __magic_name__ : Optional[int] = None if attr_name.endswith('''Tokenizer''' ): __magic_name__ : List[str] = slow_tokenizers __magic_name__ : Any = attr_name[:-9] elif attr_name.endswith('''TokenizerFast''' ): __magic_name__ : Optional[Any] = fast_tokenizers __magic_name__ : Dict = attr_name[:-1_3] elif _re_tf_models.match(__A ) is not None: __magic_name__ : str = tf_models __magic_name__ : Optional[Any] = _re_tf_models.match(__A ).groups()[0] elif _re_flax_models.match(__A ) is not None: __magic_name__ : Union[str, Any] = flax_models __magic_name__ : Any = _re_flax_models.match(__A ).groups()[0] elif _re_pt_models.match(__A ) is not None: __magic_name__ : Union[str, Any] = pt_models __magic_name__ : Any = _re_pt_models.match(__A ).groups()[0] if lookup_dict is not None: while len(__A ) > 0: if attr_name in model_name_to_prefix.values(): __magic_name__ : Optional[Any] = True break # Try again after removing the last word in the name __magic_name__ : List[Any] = ''''''.join(camel_case_split(__A )[:-1] ) # Let's build that table! __magic_name__ : Dict = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) __magic_name__ : Any = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support'''] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). __magic_name__ : Union[str, Any] = [len(__A ) + 2 for c in columns] __magic_name__ : str = max([len(__A ) for name in model_names] ) + 2 # Build the table per se __magic_name__ : Tuple = '''|''' + '''|'''.join([_center_text(__A ,__A ) for c, w in zip(__A ,__A )] ) + '''|\n''' # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n" __magic_name__ : Dict = {True: '''✅''', False: '''❌'''} for name in model_names: __magic_name__ : Dict = model_name_to_prefix[name] __magic_name__ : int = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(__A ,__A ) for l, w in zip(__A ,__A )] ) + "|\n" return table def lowercase__ ( __A: Any=False ): '''simple docstring''' __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : str = _find_text_in_file( filename=os.path.join(__A ,'''index.md''' ) ,start_prompt='''<!--This table is updated automatically from the auto modules''' ,end_prompt='''<!-- End table-->''' ,) __magic_name__ : Dict = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(__A ,'''index.md''' ) ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( '''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' ) if __name__ == "__main__": __lowerCamelCase : Any = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') __lowerCamelCase : Optional[int] = parser.parse_args() check_model_table(args.fix_and_overwrite)
501
import collections import json import math import os import re import time from fnmatch import fnmatch from typing import Dict import requests from slack_sdk import WebClient __lowerCamelCase : Dict = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN''']) def lowercase__ ( __A: List[Any] ): '''simple docstring''' __magic_name__ : int = test_results.split(''' ''' ) __magic_name__ : Optional[Any] = 0 __magic_name__ : int = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. __magic_name__ : Optional[Any] = expressions[-2] if '''=''' in expressions[-1] else expressions[-1] for i, expression in enumerate(__A ): if "failed" in expression: failed += int(expressions[i - 1] ) if "passed" in expression: success += int(expressions[i - 1] ) return failed, success, time_spent def lowercase__ ( __A: Dict ): '''simple docstring''' __magic_name__ : Tuple = {} __magic_name__ : List[Any] = None __magic_name__ : int = False for line in failures_short_lines.split('''\n''' ): if re.search(r'''_ \[doctest\]''' ,__A ): __magic_name__ : Dict = True __magic_name__ : Any = line.split(''' ''' )[2] elif in_error and not line.split(''' ''' )[0].isdigit(): __magic_name__ : List[Any] = line __magic_name__ : List[Any] = False return failures class lowerCamelCase : '''simple docstring''' def __init__( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : Dict ) -> List[str]: __magic_name__ : Union[str, Any] = title __magic_name__ : List[Any] = doc_test_results['''time_spent'''].split(''',''' )[0] __magic_name__ : Optional[Any] = doc_test_results['''success'''] __magic_name__ : Optional[Any] = doc_test_results['''failures'''] __magic_name__ : Union[str, Any] = self.n_success + self.n_failures # Failures and success of the modeling tests __magic_name__ : List[Any] = doc_test_results @property def UpperCAmelCase__ ( self : List[str] ) -> str: __magic_name__ : Union[str, Any] = [self._time_spent] __magic_name__ : str = 0 for time in time_spent: __magic_name__ : List[Any] = time.split(''':''' ) # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(lowerCamelCase_ ) == 1: __magic_name__ : str = [0, 0, time_parts[0]] __magic_name__ , __magic_name__ , __magic_name__ : List[Any] = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] ) total_secs += hours * 3600 + minutes * 60 + seconds __magic_name__ , __magic_name__ , __magic_name__ : Dict = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60 return F'''{int(lowerCamelCase_ )}h{int(lowerCamelCase_ )}m{int(lowerCamelCase_ )}s''' @property def UpperCAmelCase__ ( self : Dict ) -> Dict: return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict: return { "type": "section", "text": { "type": "plain_text", "text": F'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''', "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''', }, } @property def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict: return { "type": "section", "text": { "type": "plain_text", "text": ( F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in''' F''' {self.time}.''' ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''', }, } @property def UpperCAmelCase__ ( self : Dict ) -> Dict: __magic_name__ : List[Any] = 40 __magic_name__ : Optional[Any] = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(lowerCamelCase_ , lowerCamelCase_ )} __magic_name__ : Any = '''''' for category, failures in category_failures.items(): if len(lowerCamelCase_ ) == 0: continue if report != "": report += "\n\n" report += F'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n" report += "`" report += "`\n`".join(lowerCamelCase_ ) report += "`" return { "type": "section", "text": { "type": "mrkdwn", "text": F'''The following examples had failures:\n\n\n{report}\n''', }, } @property def UpperCAmelCase__ ( self : Dict ) -> str: __magic_name__ : List[str] = [self.header] if self.n_failures > 0: blocks.append(self.failures ) if self.n_failures > 0: blocks.extend([self.category_failures] ) if self.n_failures == 0: blocks.append(self.no_failures ) return json.dumps(lowerCamelCase_ ) @staticmethod def UpperCAmelCase__ ( ) -> List[Any]: __magic_name__ : Dict = [ { '''type''': '''section''', '''text''': { '''type''': '''plain_text''', '''text''': '''There was an issue running the tests.''', }, '''accessory''': { '''type''': '''button''', '''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True}, '''url''': F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''', }, } ] print('''Sending the following payload''' ) print(json.dumps({'''blocks''': json.loads(lowerCamelCase_ )} ) ) client.chat_postMessage( channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=lowerCamelCase_ , ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> Union[str, Any]: print('''Sending the following payload''' ) print(json.dumps({'''blocks''': json.loads(self.payload )} ) ) __magic_name__ : Tuple = F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else '''All tests passed.''' __magic_name__ : List[Any] = client.chat_postMessage( channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=lowerCamelCase_ , ) def UpperCAmelCase__ ( self : Tuple , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : Any ) -> Optional[Any]: __magic_name__ : Optional[Any] = '''''' for key, value in failures.items(): __magic_name__ : int = value[:200] + ''' [Truncated]''' if len(lowerCamelCase_ ) > 250 else value failures_text += F'''*{key}*\n_{value}_\n\n''' __magic_name__ : Dict = job_name __magic_name__ : Tuple = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}} if job_link is not None: __magic_name__ : int = { '''type''': '''button''', '''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True}, '''url''': job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failures_text}}, ] def UpperCAmelCase__ ( self : Any ) -> Tuple: if self.thread_ts is None: raise ValueError('''Can only post reply if a post has been made.''' ) __magic_name__ : List[str] = self.doc_test_results.pop('''job_link''' ) self.doc_test_results.pop('''failures''' ) self.doc_test_results.pop('''success''' ) self.doc_test_results.pop('''time_spent''' ) __magic_name__ : int = sorted(self.doc_test_results.items() , key=lambda lowerCamelCase_ : t[0] ) for job, job_result in sorted_dict: if len(job_result['''failures'''] ): __magic_name__ : List[Any] = F'''*Num failures* :{len(job_result['failed'] )} \n''' __magic_name__ : str = job_result['''failures'''] __magic_name__ : str = self.get_reply_blocks(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , text=lowerCamelCase_ ) print('''Sending the following reply''' ) print(json.dumps({'''blocks''': blocks} ) ) client.chat_postMessage( channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=F'''Results for {job}''' , blocks=lowerCamelCase_ , thread_ts=self.thread_ts['''ts'''] , ) time.sleep(1 ) def lowercase__ ( ): '''simple docstring''' __magic_name__ : List[Any] = os.environ['''GITHUB_RUN_ID'''] __magic_name__ : Optional[int] = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100''' __magic_name__ : Any = requests.get(__A ).json() __magic_name__ : List[str] = {} try: jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} ) __magic_name__ : Optional[int] = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 ) for i in range(__A ): __magic_name__ : Optional[Any] = requests.get(url + F'''&page={i + 2}''' ).json() jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} ) return jobs except Exception as e: print('''Unknown error, could not fetch links.''' ,__A ) return {} def lowercase__ ( __A: str ): '''simple docstring''' __magic_name__ : List[str] = {} if os.path.exists(__A ): __magic_name__ : Any = os.listdir(__A ) for file in files: try: with open(os.path.join(__A ,__A ) ,encoding='''utf-8''' ) as f: __magic_name__ : Dict = f.read() except UnicodeDecodeError as e: raise ValueError(F'''Could not open {os.path.join(__A ,__A )}.''' ) from e return _artifact def lowercase__ ( ): '''simple docstring''' class lowerCamelCase : '''simple docstring''' def __init__( self : Optional[Any] , lowerCamelCase_ : str ) -> int: __magic_name__ : int = name __magic_name__ : Dict = [] def __str__( self : List[str] ) -> Union[str, Any]: return self.name def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase_ : str ) -> Optional[Any]: self.paths.append({'''name''': self.name, '''path''': path} ) __magic_name__ : Dict[str, Artifact] = {} __magic_name__ : Tuple = filter(os.path.isdir ,os.listdir() ) for directory in directories: __magic_name__ : int = directory if artifact_name not in _available_artifacts: __magic_name__ : List[Any] = Artifact(__A ) _available_artifacts[artifact_name].add_path(__A ) return _available_artifacts if __name__ == "__main__": __lowerCamelCase : Optional[int] = get_job_links() __lowerCamelCase : Tuple = retrieve_available_artifacts() __lowerCamelCase : Tuple = collections.OrderedDict( [ ('''*.py''', '''API Examples'''), ('''*.md''', '''MD Examples'''), ] ) # This dict will contain all the information relative to each doc test category: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' __lowerCamelCase : Tuple = { v: { '''failed''': [], '''failures''': {}, } for v in docs.values() } # Link to the GitHub Action job __lowerCamelCase : Optional[int] = github_actions_job_links.get('''run_doctests''') __lowerCamelCase : Optional[Any] = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0] __lowerCamelCase : Optional[int] = retrieve_artifact(artifact_path['''name''']) if "stats" in artifact: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict = handle_test_results(artifact['''stats''']) __lowerCamelCase : Optional[Any] = failed __lowerCamelCase : List[Any] = success __lowerCamelCase : int = time_spent[1:-1] + ''', ''' __lowerCamelCase : Dict = extract_first_line_failure(artifact['''failures_short''']) for line in artifact["summary_short"].split('''\n'''): if re.search('''FAILED''', line): __lowerCamelCase : List[Any] = line.replace('''FAILED ''', '''''') __lowerCamelCase : int = line.split()[0].replace('''\n''', '''''') if "::" in line: __lowerCamelCase , __lowerCamelCase : List[str] = line.split('''::''') else: __lowerCamelCase , __lowerCamelCase : Optional[int] = line, line for file_regex in docs.keys(): if fnmatch(file_path, file_regex): __lowerCamelCase : Union[str, Any] = docs[file_regex] doc_test_results[category]["failed"].append(test) __lowerCamelCase : List[Any] = all_failures[test] if test in all_failures else '''N/A''' __lowerCamelCase : int = failure break __lowerCamelCase : List[Any] = Message('''🤗 Results of the doc tests.''', doc_test_results) message.post() message.post_reply()
501
1
'''simple docstring''' from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING lowerCamelCase_ = logging.get_logger(__name__) @add_end_docstrings(snake_case_ ) class _UpperCAmelCase ( snake_case_ ): """simple docstring""" def __init__( self : Tuple , *__UpperCAmelCase : Any , **__UpperCAmelCase : str ): '''simple docstring''' super().__init__(*__UpperCAmelCase , **__UpperCAmelCase ) requires_backends(self , "decord" ) self.check_model_type(__UpperCAmelCase ) def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : List[str]=None ): '''simple docstring''' _A = {} if frame_sampling_rate is not None: _A = frame_sampling_rate if num_frames is not None: _A = num_frames _A = {} if top_k is not None: _A = top_k return preprocess_params, {}, postprocess_params def __call__( self : Optional[int] , __UpperCAmelCase : Union[str, List[str]] , **__UpperCAmelCase : Union[str, Any] ): '''simple docstring''' return super().__call__(__UpperCAmelCase , **__UpperCAmelCase ) def lowerCAmelCase ( self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : str=None , __UpperCAmelCase : Dict=1 ): '''simple docstring''' if num_frames is None: _A = self.model.config.num_frames if video.startswith("http://" ) or video.startswith("https://" ): _A = BytesIO(requests.get(__UpperCAmelCase ).content ) _A = VideoReader(__UpperCAmelCase ) videoreader.seek(0 ) _A = 0 _A = num_frames * frame_sampling_rate - 1 _A = np.linspace(__UpperCAmelCase , __UpperCAmelCase , num=__UpperCAmelCase , dtype=np.intaa ) _A = videoreader.get_batch(__UpperCAmelCase ).asnumpy() _A = list(__UpperCAmelCase ) _A = self.image_processor(__UpperCAmelCase , return_tensors=self.framework ) return model_inputs def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[Any] ): '''simple docstring''' _A = self.model(**__UpperCAmelCase ) return model_outputs def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int]=5 ): '''simple docstring''' if top_k > self.model.config.num_labels: _A = self.model.config.num_labels if self.framework == "pt": _A = model_outputs.logits.softmax(-1 )[0] _A , _A = probs.topk(__UpperCAmelCase ) else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) _A = scores.tolist() _A = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__UpperCAmelCase , __UpperCAmelCase )]
330
'''simple docstring''' import random from typing import Any def __lowercase ( __lowercase ) -> list[Any]: '''simple docstring''' for _ in range(len(__lowercase ) ): _A = random.randint(0 , len(__lowercase ) - 1 ) _A = random.randint(0 , len(__lowercase ) - 1 ) _A , _A = data[b], data[a] return data if __name__ == "__main__": lowerCamelCase_ = [0, 1, 2, 3, 4, 5, 6, 7] lowerCamelCase_ = ['''python''', '''says''', '''hello''', '''!'''] print('''Fisher-Yates Shuffle:''') print('''List''', integers, strings) print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
330
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = { '''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''], '''tokenization_lxmert''': ['''LxmertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''LxmertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''LxmertEncoder''', '''LxmertForPreTraining''', '''LxmertForQuestionAnswering''', '''LxmertModel''', '''LxmertPreTrainedModel''', '''LxmertVisualFeatureEncoder''', '''LxmertXLayer''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLxmertForPreTraining''', '''TFLxmertMainLayer''', '''TFLxmertModel''', '''TFLxmertPreTrainedModel''', '''TFLxmertVisualFeatureEncoder''', ] if TYPE_CHECKING: from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig from .tokenization_lxmert import LxmertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_lxmert_fast import LxmertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lxmert import ( LxmertEncoder, LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel, LxmertPreTrainedModel, LxmertVisualFeatureEncoder, LxmertXLayer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_lxmert import ( TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFLxmertForPreTraining, TFLxmertMainLayer, TFLxmertModel, TFLxmertPreTrainedModel, TFLxmertVisualFeatureEncoder, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
716
import gc import unittest import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class a__ ( snake_case , unittest.TestCase ): """simple docstring""" __lowerCamelCase = DanceDiffusionPipeline __lowerCamelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS __lowerCamelCase = PipelineTesterMixin.required_optional_params - { 'callback', 'latents', 'callback_steps', 'output_type', 'num_images_per_prompt', } __lowerCamelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS __lowerCamelCase = False __lowerCamelCase = False def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) A__ = UNetaDModel( block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=lowercase , use_timestep_embedding=lowercase , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , ) A__ = IPNDMScheduler() A__ = { "unet": unet, "scheduler": scheduler, } return components def UpperCamelCase ( self , lowercase , lowercase=0 ) -> Union[str, Any]: '''simple docstring''' if str(lowercase ).startswith("mps" ): A__ = torch.manual_seed(lowercase ) else: A__ = torch.Generator(device=lowercase ).manual_seed(lowercase ) A__ = { "batch_size": 1, "generator": generator, "num_inference_steps": 4, } return inputs def UpperCamelCase ( self ) -> Any: '''simple docstring''' A__ = "cpu" # ensure determinism for the device-dependent torch.Generator A__ = self.get_dummy_components() A__ = DanceDiffusionPipeline(**lowercase ) A__ = pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) A__ = self.get_dummy_inputs(lowercase ) A__ = pipe(**lowercase ) A__ = output.audios A__ = audio[0, -3:, -3:] assert audio.shape == (1, 2, components["unet"].sample_size) A__ = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def UpperCamelCase ( self ) -> Dict: '''simple docstring''' return super().test_save_load_local() @skip_mps def UpperCamelCase ( self ) -> int: '''simple docstring''' return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) @skip_mps def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' return super().test_save_load_optional_components() @skip_mps def UpperCamelCase ( self ) -> int: '''simple docstring''' return super().test_attention_slicing_forward_pass() def UpperCamelCase ( self ) -> str: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class a__ ( unittest.TestCase ): """simple docstring""" def UpperCamelCase ( self ) -> int: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = torch_device A__ = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" ) A__ = pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) A__ = torch.manual_seed(0 ) A__ = pipe(generator=lowercase , num_inference_steps=100 , audio_length_in_s=4.096 ) A__ = output.audios A__ = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) A__ = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = torch_device A__ = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa ) A__ = pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) A__ = torch.manual_seed(0 ) A__ = pipe(generator=lowercase , num_inference_steps=100 , audio_length_in_s=4.096 ) A__ = output.audios A__ = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) A__ = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
626
0
def _snake_case (_snake_case : int) -> int: assert isinstance(_snake_case , _snake_case), f'''The input value of [n={number}] is not an integer''' if number == 1: return 2 elif number < 1: _lowercase =f'''The input value of [n={number}] has to be > 0''' raise ValueError(_snake_case) else: _lowercase =sylvester(number - 1) _lowercase =num - 1 _lowercase =num return lower * upper + 1 if __name__ == "__main__": print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
181
from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import BaseOutput, is_torch_available, is_transformers_available @dataclass class SCREAMING_SNAKE_CASE_ ( _a ): """simple docstring""" __lowerCAmelCase : Union[List[PIL.Image.Image], np.ndarray] __lowerCAmelCase : Optional[List[bool]] if is_transformers_available() and is_torch_available(): from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
181
1
import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class snake_case__ ( unittest.TestCase ): """simple docstring""" def __init__( self : Union[str, Any], _snake_case : Any, _snake_case : List[str]=1_3, _snake_case : Dict=7, _snake_case : Any=True, _snake_case : Union[str, Any]=True, _snake_case : Union[str, Any]=True, _snake_case : Union[str, Any]=True, _snake_case : Optional[Any]=9_9, _snake_case : Optional[int]=3_2, _snake_case : int=5, _snake_case : List[str]=4, _snake_case : List[str]=3_7, _snake_case : Optional[Any]="gelu", _snake_case : Dict=0.1, _snake_case : List[Any]=0.1, _snake_case : Optional[Any]=5_1_2, _snake_case : Optional[Any]=1_6, _snake_case : List[str]=2, _snake_case : str=0.0_2, _snake_case : int=4, ) ->Dict: snake_case__ : Optional[int] = parent snake_case__ : str = batch_size snake_case__ : Dict = seq_length snake_case__ : List[str] = is_training snake_case__ : List[str] = use_attention_mask snake_case__ : Union[str, Any] = use_token_type_ids snake_case__ : List[Any] = use_labels snake_case__ : List[Any] = vocab_size snake_case__ : List[str] = hidden_size snake_case__ : Tuple = num_hidden_layers snake_case__ : List[str] = num_attention_heads snake_case__ : Optional[Any] = intermediate_size snake_case__ : Any = hidden_act snake_case__ : Union[str, Any] = hidden_dropout_prob snake_case__ : List[str] = attention_probs_dropout_prob snake_case__ : List[str] = max_position_embeddings snake_case__ : List[Any] = type_vocab_size snake_case__ : int = type_sequence_label_size snake_case__ : str = initializer_range snake_case__ : Union[str, Any] = num_choices def lowercase_ ( self : List[str] ) ->Tuple: snake_case__ : int = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) snake_case__ : Tuple = None if self.use_attention_mask: snake_case__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) snake_case__ : Tuple = None if self.use_token_type_ids: snake_case__ : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) snake_case__ : int = RobertaPreLayerNormConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=_lowerCAmelCase, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, attention_mask def lowercase_ ( self : int ) ->Tuple: snake_case__ : List[Any] = self.prepare_config_and_inputs() snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[Any] = config_and_inputs snake_case__ : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict def lowercase_ ( self : Any ) ->int: snake_case__ : Dict = self.prepare_config_and_inputs() snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[str] = config_and_inputs snake_case__ : str = True snake_case__ : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class snake_case__ ( _lowerCAmelCase , unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def lowercase_ ( self : Any ) ->Dict: snake_case__ : Any = FlaxRobertaPreLayerNormModelTester(self ) @slow def lowercase_ ( self : Union[str, Any] ) ->str: for model_class_name in self.all_model_classes: snake_case__ : Any = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40', from_pt=_lowerCAmelCase ) snake_case__ : List[str] = model(np.ones((1, 1) ) ) self.assertIsNotNone(_lowerCAmelCase ) @require_flax class snake_case__ ( unittest.TestCase ): """simple docstring""" @slow def lowercase_ ( self : str ) ->str: snake_case__ : List[str] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40', from_pt=_lowerCAmelCase ) snake_case__ : List[Any] = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]], dtype=jnp.intaa ) snake_case__ : Optional[Any] = model(_lowerCAmelCase )[0] snake_case__ : Any = [1, 1_1, 5_0_2_6_5] self.assertEqual(list(output.shape ), _lowerCAmelCase ) # compare the actual values for a slice. snake_case__ : Optional[int] = np.array( [[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]], dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3], _lowerCAmelCase, atol=1e-4 ) ) @slow def lowercase_ ( self : List[Any] ) ->Dict: snake_case__ : Union[str, Any] = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40', from_pt=_lowerCAmelCase ) snake_case__ : Union[str, Any] = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]], dtype=jnp.intaa ) snake_case__ : Dict = model(_lowerCAmelCase )[0] # compare the actual values for a slice. snake_case__ : Any = np.array( [[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]], dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3], _lowerCAmelCase, atol=1e-4 ) )
709
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: a_ :Dict = None a_ :List[str] = logging.get_logger(__name__) a_ :Dict = "▁" a_ :Dict = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} a_ :Union[str, Any] = { "vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}, "tokenizer_file": { "google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json" }, } a_ :List[Any] = { "google/pegasus-xsum": 512, } class snake_case__ ( lowerCAmelCase_ ): """simple docstring""" _SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE = PegasusTokenizer _SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""] def __init__( self : Any, _snake_case : Any=None, _snake_case : Optional[Any]=None, _snake_case : Tuple="<pad>", _snake_case : Tuple="</s>", _snake_case : List[str]="<unk>", _snake_case : Any="<mask_2>", _snake_case : Optional[Any]="<mask_1>", _snake_case : Tuple=None, _snake_case : str=1_0_3, **_snake_case : Dict, ) ->List[Any]: snake_case__ : Any = offset if additional_special_tokens is not None: if not isinstance(_snake_case, _snake_case ): raise TypeError( F'''additional_special_tokens should be of type {type(_snake_case )}, but is''' F''' {type(_snake_case )}''' ) snake_case__ : int = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ F'''<unk_{i}>''' for i in range(len(_snake_case ), self.offset - 1 ) ] if len(set(_snake_case ) ) != len(_snake_case ): raise ValueError( 'Please make sure that the provided additional_special_tokens do not contain an incorrectly' F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' ) snake_case__ : Optional[int] = additional_special_tokens_extended else: snake_case__ : Optional[Any] = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [F'''<unk_{i}>''' for i in range(2, self.offset )] super().__init__( _snake_case, tokenizer_file=_snake_case, pad_token=_snake_case, eos_token=_snake_case, unk_token=_snake_case, mask_token=_snake_case, mask_token_sent=_snake_case, offset=_snake_case, additional_special_tokens=_snake_case, **_snake_case, ) snake_case__ : str = vocab_file snake_case__ : int = False if not self.vocab_file else True def lowercase_ ( self : Optional[Any], _snake_case : str ) ->Dict: snake_case__ : int = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ): raise ValueError( 'There should be 3 special tokens: mask_token, pad_token, and eos_token +' F''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' ) return [1 if x in all_special_ids else 0 for x in seq] def lowercase_ ( self : Dict, _snake_case : List, _snake_case : Optional[List] = None, _snake_case : bool = False ) ->List[int]: if already_has_special_tokens: return self._special_token_mask(_snake_case ) elif token_ids_a is None: return self._special_token_mask(_snake_case ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def lowercase_ ( self : Any, _snake_case : Union[str, Any], _snake_case : Union[str, Any]=None ) ->List[int]: if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def lowercase_ ( self : Optional[Any], _snake_case : str, _snake_case : Optional[str] = None ) ->Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(_snake_case ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case__ : List[Any] = os.path.join( _snake_case, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ): copyfile(self.vocab_file, _snake_case ) return (out_vocab_file,)
243
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFXLMRobertaModel @require_tf @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( unittest.TestCase ): @slow def _a ( self ) -> Any: _UpperCAmelCase = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base" ) _UpperCAmelCase = { "input_ids": tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]] , dtype=tf.intaa ), # "My dog is cute" "attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ), } _UpperCAmelCase = model(a_ )["last_hidden_state"] _UpperCAmelCase = tf.TensorShape((1, 6, 768) ) self.assertEqual(output.shape , a_ ) # compare the actual values for a slice. _UpperCAmelCase = tf.convert_to_tensor( [ [ [0.0681762, 0.10894451, 0.06772504], [-0.06423668, 0.02366615, 0.04329344], [-0.06057295, 0.09974135, -0.00070584], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
657
"""simple docstring""" import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _lowerCAmelCase : def __init__( self , a_ , a_=2 , a_=3 , a_=4 , a_=2 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=36 , a_=3 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=6 , a_=6 , a_=3 , a_=4 , a_=None , a_=1000 , ) -> Optional[Any]: _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = text_seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = coordinate_size _UpperCAmelCase = shape_size _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope _UpperCAmelCase = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) _UpperCAmelCase = text_seq_length _UpperCAmelCase = (image_size // patch_size) ** 2 + 1 _UpperCAmelCase = self.text_seq_length + self.image_seq_length def _a ( self ) -> Dict: _UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: _UpperCAmelCase = bbox[i, j, 3] _UpperCAmelCase = bbox[i, j, 1] _UpperCAmelCase = t if bbox[i, j, 2] < bbox[i, j, 0]: _UpperCAmelCase = bbox[i, j, 2] _UpperCAmelCase = bbox[i, j, 0] _UpperCAmelCase = t _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) _UpperCAmelCase = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Tuple: _UpperCAmelCase = LayoutLMvaModel(config=a_ ) model.to(a_ ) model.eval() # text + image _UpperCAmelCase = model(a_ , pixel_values=a_ ) _UpperCAmelCase = model( a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ ) _UpperCAmelCase = model(a_ , bbox=a_ , pixel_values=a_ , token_type_ids=a_ ) _UpperCAmelCase = model(a_ , bbox=a_ , pixel_values=a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only _UpperCAmelCase = model(a_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only _UpperCAmelCase = model(pixel_values=a_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[Any]: _UpperCAmelCase = self.num_labels _UpperCAmelCase = LayoutLMvaForSequenceClassification(a_ ) model.to(a_ ) model.eval() _UpperCAmelCase = model( a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Union[str, Any]: _UpperCAmelCase = self.num_labels _UpperCAmelCase = LayoutLMvaForTokenClassification(config=a_ ) model.to(a_ ) model.eval() _UpperCAmelCase = model( a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Dict: _UpperCAmelCase = LayoutLMvaForQuestionAnswering(config=a_ ) model.to(a_ ) model.eval() _UpperCAmelCase = model( a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _a ( self ) -> Optional[int]: _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = { "input_ids": input_ids, "bbox": bbox, "pixel_values": pixel_values, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , unittest.TestCase ): lowercase_ : Any = False lowercase_ : Dict = False lowercase_ : List[str] = False lowercase_ : str = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) lowercase_ : int = ( {'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel} if is_torch_available() else {} ) def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> List[str]: # `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual # embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has # the sequence dimension of the text embedding only. # (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`) return True def _a ( self ) -> Union[str, Any]: _UpperCAmelCase = LayoutLMvaModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=a_ , hidden_size=37 ) def _a ( self , a_ , a_ , a_=False ) -> List[str]: _UpperCAmelCase = copy.deepcopy(a_ ) if model_class in get_values(a_ ): _UpperCAmelCase = { k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous() if isinstance(a_ , torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(a_ ): _UpperCAmelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=a_ ) elif model_class in get_values(a_ ): _UpperCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a_ ) _UpperCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a_ ) elif model_class in [ *get_values(a_ ), ]: _UpperCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a_ ) elif model_class in [ *get_values(a_ ), ]: _UpperCAmelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=a_ , ) return inputs_dict def _a ( self ) -> int: self.config_tester.run_common_tests() def _a ( self ) -> List[str]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def _a ( self ) -> List[str]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _UpperCAmelCase = type self.model_tester.create_and_check_model(*a_ ) def _a ( self ) -> int: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*a_ ) def _a ( self ) -> Any: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*a_ ) def _a ( self ) -> List[Any]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*a_ ) @slow def _a ( self ) -> List[str]: for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = LayoutLMvaModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def __lowerCamelCase ( ): """simple docstring""" _UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch class _lowerCAmelCase ( unittest.TestCase ): @cached_property def _a ( self ) -> List[Any]: return LayoutLMvaImageProcessor(apply_ocr=a_ ) if is_vision_available() else None @slow def _a ( self ) -> Union[str, Any]: _UpperCAmelCase = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(a_ ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=a_ , return_tensors="pt" ).pixel_values.to(a_ ) _UpperCAmelCase = torch.tensor([[1, 2]] ) _UpperCAmelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass _UpperCAmelCase = model( input_ids=input_ids.to(a_ ) , bbox=bbox.to(a_ ) , pixel_values=pixel_values.to(a_ ) , ) # verify the logits _UpperCAmelCase = torch.Size((1, 199, 768) ) self.assertEqual(outputs.last_hidden_state.shape , a_ ) _UpperCAmelCase = torch.tensor( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(a_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a_ , atol=1e-4 ) )
657
1
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def _lowerCamelCase ( a_ : Union[str, Any]): lowerCamelCase :Dict = 3_84 if "tiny" in model_name: lowerCamelCase :Optional[int] = [3, 3, 9, 3] lowerCamelCase :int = [96, 1_92, 3_84, 7_68] if "small" in model_name: lowerCamelCase :Any = [3, 3, 27, 3] lowerCamelCase :Any = [96, 1_92, 3_84, 7_68] if "base" in model_name: lowerCamelCase :List[str] = [3, 3, 27, 3] lowerCamelCase :Optional[Any] = [1_28, 2_56, 5_12, 10_24] lowerCamelCase :Any = 5_12 if "large" in model_name: lowerCamelCase :int = [3, 3, 27, 3] lowerCamelCase :Any = [1_92, 3_84, 7_68, 15_36] lowerCamelCase :int = 7_68 if "xlarge" in model_name: lowerCamelCase :int = [3, 3, 27, 3] lowerCamelCase :Union[str, Any] = [2_56, 5_12, 10_24, 20_48] lowerCamelCase :Any = 10_24 # set label information lowerCamelCase :Optional[Any] = 1_50 lowerCamelCase :str = '''huggingface/label-files''' lowerCamelCase :List[str] = '''ade20k-id2label.json''' lowerCamelCase :Any = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r''')) lowerCamelCase :Optional[Any] = {int(a_): v for k, v in idalabel.items()} lowerCamelCase :Union[str, Any] = {v: k for k, v in idalabel.items()} lowerCamelCase :List[Any] = ConvNextConfig( depths=a_ , hidden_sizes=a_ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4''']) lowerCamelCase :List[Any] = UperNetConfig( backbone_config=a_ , auxiliary_in_channels=a_ , num_labels=a_ , idalabel=a_ , labelaid=a_ , ) return config def _lowerCamelCase ( a_ : List[Any]): lowerCamelCase :str = [] # fmt: off # stem rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''')) rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''')) rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''')) rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''')) # stages for i in range(len(config.backbone_config.depths)): for j in range(config.backbone_config.depths[i]): rename_keys.append((F"backbone.stages.{i}.{j}.gamma", F"backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter")) rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.weight", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.weight")) rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.bias", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.bias")) rename_keys.append((F"backbone.stages.{i}.{j}.norm.weight", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.weight")) rename_keys.append((F"backbone.stages.{i}.{j}.norm.bias", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.bias")) rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight")) rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias")) rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight")) rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias")) if i > 0: rename_keys.append((F"backbone.downsample_layers.{i}.0.weight", F"backbone.encoder.stages.{i}.downsampling_layer.0.weight")) rename_keys.append((F"backbone.downsample_layers.{i}.0.bias", F"backbone.encoder.stages.{i}.downsampling_layer.0.bias")) rename_keys.append((F"backbone.downsample_layers.{i}.1.weight", F"backbone.encoder.stages.{i}.downsampling_layer.1.weight")) rename_keys.append((F"backbone.downsample_layers.{i}.1.bias", F"backbone.encoder.stages.{i}.downsampling_layer.1.bias")) rename_keys.append((F"backbone.norm{i}.weight", F"backbone.hidden_states_norms.stage{i+1}.weight")) rename_keys.append((F"backbone.norm{i}.bias", F"backbone.hidden_states_norms.stage{i+1}.bias")) # decode head rename_keys.extend( [ ('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''), ('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''), ('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''), ('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''), ]) # fmt: on return rename_keys def _lowerCamelCase ( a_ : int , a_ : List[str] , a_ : int): lowerCamelCase :List[Any] = dct.pop(a_) lowerCamelCase :List[str] = val def _lowerCamelCase ( a_ : Union[str, Any] , a_ : Optional[Any] , a_ : int): lowerCamelCase :Optional[int] = { '''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''', '''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''', '''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''', '''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''', '''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''', } lowerCamelCase :List[Any] = model_name_to_url[model_name] lowerCamelCase :Any = torch.hub.load_state_dict_from_url(a_ , map_location='''cpu''')['''state_dict'''] lowerCamelCase :Tuple = get_upernet_config(a_) lowerCamelCase :Optional[int] = UperNetForSemanticSegmentation(a_) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): lowerCamelCase :List[Any] = state_dict.pop(a_) if "bn" in key: lowerCamelCase :List[Any] = key.replace('''bn''' , '''batch_norm''') lowerCamelCase :Optional[int] = val # rename keys lowerCamelCase :str = create_rename_keys(a_) for src, dest in rename_keys: rename_key(a_ , a_ , a_) model.load_state_dict(a_) # verify on image lowerCamelCase :Tuple = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg''' lowerCamelCase :Tuple = Image.open(requests.get(a_ , stream=a_).raw).convert('''RGB''') lowerCamelCase :List[str] = SegformerImageProcessor() lowerCamelCase :Any = processor(a_ , return_tensors='''pt''').pixel_values with torch.no_grad(): lowerCamelCase :Any = model(a_) if model_name == "upernet-convnext-tiny": lowerCamelCase :Optional[Any] = torch.tensor( [[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]]) elif model_name == "upernet-convnext-small": lowerCamelCase :Any = torch.tensor( [[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]]) elif model_name == "upernet-convnext-base": lowerCamelCase :Any = torch.tensor( [[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]]) elif model_name == "upernet-convnext-large": lowerCamelCase :List[Any] = torch.tensor( [[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]]) elif model_name == "upernet-convnext-xlarge": lowerCamelCase :Dict = torch.tensor( [[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]]) print('''Logits:''' , outputs.logits[0, 0, :3, :3]) assert torch.allclose(outputs.logits[0, 0, :3, :3] , a_ , atol=1e-4) print('''Looks ok!''') if pytorch_dump_folder_path is not None: print(F"Saving model {model_name} to {pytorch_dump_folder_path}") model.save_pretrained(a_) print(F"Saving processor to {pytorch_dump_folder_path}") processor.save_pretrained(a_) if push_to_hub: print(F"Pushing model and processor for {model_name} to hub") model.push_to_hub(F"openmmlab/{model_name}") processor.push_to_hub(F"openmmlab/{model_name}") if __name__ == "__main__": A__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""upernet-convnext-tiny""", type=str, choices=[F'upernet-convnext-{size}' for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]], help="""Name of the ConvNext UperNet model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) A__ = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
701
import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) A__ = [ """cross_validation.py""", """gradient_accumulation.py""", """local_sgd.py""", """multi_process_metrics.py""", """memory.py""", """automatic_gradient_accumulation.py""", """fsdp_with_peak_mem_tracking.py""", """deepspeed_with_config_support.py""", """megatron_lm_gpt_pretraining.py""", ] class _lowerCAmelCase ( unittest.TestCase ): def snake_case ( self : Any , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ): lowerCamelCase :Tuple = None lowerCamelCase :Tuple = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) ) lowerCamelCase :Optional[int] = os.path.abspath('''examples''' ) for item in os.listdir(__snake_case ): if item not in EXCLUDE_EXAMPLES: lowerCamelCase :Optional[int] = os.path.join(__snake_case , __snake_case ) if os.path.isfile(__snake_case ) and ".py" in item_path: with self.subTest( tested_script=__snake_case , feature_script=__snake_case , tested_section='''main()''' if parser_only else '''training_function()''' , ): lowerCamelCase :Union[str, Any] = compare_against_test( os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case ) lowerCamelCase :int = '''\n'''.join(__snake_case ) if special_strings is not None: for string in special_strings: lowerCamelCase :int = diff.replace(__snake_case , '''''' ) self.assertEqual(__snake_case , '''''' ) def snake_case ( self : Dict ): self.one_complete_example('''complete_nlp_example.py''' , __snake_case ) self.one_complete_example('''complete_nlp_example.py''' , __snake_case ) def snake_case ( self : Optional[Any] ): lowerCamelCase :Union[str, Any] = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) ) lowerCamelCase :Optional[int] = [ ''' ''' * 16 + '''{\n\n''', ''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''', ''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''', ''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''', ''' ''' * 20 + '''"epoch": epoch,\n\n''', ''' ''' * 16 + '''},\n\n''', ''' ''' * 16 + '''step=epoch,\n''', ''' ''' * 12, ''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''', ] self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case ) self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case ) @mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} ) class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = False @classmethod def snake_case ( cls : Optional[Any] ): super().setUpClass() lowerCamelCase :Any = tempfile.mkdtemp() lowerCamelCase :Optional[int] = os.path.join(cls._tmpdir , '''default_config.yml''' ) write_basic_config(save_location=cls.configPath ) lowerCamelCase :List[str] = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def snake_case ( cls : Dict ): super().tearDownClass() shutil.rmtree(cls._tmpdir ) def snake_case ( self : int ): lowerCamelCase :Any = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) ) def snake_case ( self : List[Any] ): lowerCamelCase :Tuple = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split() lowerCamelCase :List[Any] = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) ) def snake_case ( self : List[str] ): lowerCamelCase :Dict = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split() lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case ) self.assertNotIn('''epoch 0:''' , __snake_case ) self.assertIn('''epoch 1:''' , __snake_case ) def snake_case ( self : str ): lowerCamelCase :List[Any] = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split() lowerCamelCase :Optional[int] = run_command(self._launch_args + testargs , return_stdout=__snake_case ) if torch.cuda.is_available(): lowerCamelCase :Union[str, Any] = torch.cuda.device_count() else: lowerCamelCase :Dict = 1 if num_processes > 1: self.assertNotIn('''epoch 0:''' , __snake_case ) self.assertIn('''epoch 1:''' , __snake_case ) else: self.assertIn('''epoch 0:''' , __snake_case ) self.assertIn('''epoch 1:''' , __snake_case ) @slow def snake_case ( self : Any ): lowerCamelCase :Tuple = ''' examples/by_feature/cross_validation.py --num_folds 2 '''.split() with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ): lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case ) lowerCamelCase :Tuple = re.findall('''({.+})''' , __snake_case ) lowerCamelCase :Optional[Any] = [r for r in results if '''accuracy''' in r][-1] lowerCamelCase :List[str] = ast.literal_eval(__snake_case ) self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 ) def snake_case ( self : int ): lowerCamelCase :Dict = ['''examples/by_feature/multi_process_metrics.py'''] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case ( self : Any ): with tempfile.TemporaryDirectory() as tmpdir: lowerCamelCase :Tuple = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(__snake_case , '''tracking''' ) ) ) def snake_case ( self : Tuple ): lowerCamelCase :Tuple = ['''examples/by_feature/gradient_accumulation.py'''] run_command(self._launch_args + testargs ) def snake_case ( self : Optional[Any] ): lowerCamelCase :int = ['''examples/by_feature/local_sgd.py'''] run_command(self._launch_args + testargs )
49
0
class lowerCAmelCase : '''simple docstring''' def __init__( self : Tuple , __snake_case : List[Any] ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase = arr.split(',' ) def lowerCamelCase__ ( self : int ) -> Dict: '''simple docstring''' lowerCamelCase = [int(self.array[0] )] * len(self.array ) lowerCamelCase = [int(self.array[0] )] * len(self.array ) for i in range(1 , len(self.array ) ): lowerCamelCase = max( int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) ) lowerCamelCase = max(sum_value[i] , rear[i - 1] ) return rear[len(self.array ) - 1] if __name__ == "__main__": _lowerCAmelCase : Dict = input('please input some numbers:') _lowerCAmelCase : Any = SubArray(whole_array) _lowerCAmelCase : Optional[Any] = array.solve_sub_array() print(('the results is:', re))
246
import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase : Dict = logging.get_logger(__name__) def a_ ( UpperCamelCase_ : List[Any] ) -> str: """simple docstring""" lowerCamelCase = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: lowerCamelCase = 1_2_8 elif "12-12" in model_name: lowerCamelCase = 1_2 lowerCamelCase = 1_2 elif "14-14" in model_name: lowerCamelCase = 1_4 lowerCamelCase = 1_4 elif "16-16" in model_name: lowerCamelCase = 1_6 lowerCamelCase = 1_6 else: raise ValueError('Model not supported' ) lowerCamelCase = 'huggingface/label-files' if "speech-commands" in model_name: lowerCamelCase = 3_5 lowerCamelCase = 'speech-commands-v2-id2label.json' else: lowerCamelCase = 5_2_7 lowerCamelCase = 'audioset-id2label.json' lowerCamelCase = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type='dataset' ) , 'r' ) ) lowerCamelCase = {int(UpperCamelCase_ ): v for k, v in idalabel.items()} lowerCamelCase = idalabel lowerCamelCase = {v: k for k, v in idalabel.items()} return config def a_ ( UpperCamelCase_ : Optional[Any] ) -> Tuple: """simple docstring""" if "module.v" in name: lowerCamelCase = name.replace('module.v' , 'audio_spectrogram_transformer' ) if "cls_token" in name: lowerCamelCase = name.replace('cls_token' , 'embeddings.cls_token' ) if "dist_token" in name: lowerCamelCase = name.replace('dist_token' , 'embeddings.distillation_token' ) if "pos_embed" in name: lowerCamelCase = name.replace('pos_embed' , 'embeddings.position_embeddings' ) if "patch_embed.proj" in name: lowerCamelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) # transformer blocks if "blocks" in name: lowerCamelCase = name.replace('blocks' , 'encoder.layer' ) if "attn.proj" in name: lowerCamelCase = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: lowerCamelCase = name.replace('attn' , 'attention.self' ) if "norm1" in name: lowerCamelCase = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: lowerCamelCase = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: lowerCamelCase = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: lowerCamelCase = name.replace('mlp.fc2' , 'output.dense' ) # final layernorm if "audio_spectrogram_transformer.norm" in name: lowerCamelCase = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' ) # classifier head if "module.mlp_head.0" in name: lowerCamelCase = name.replace('module.mlp_head.0' , 'classifier.layernorm' ) if "module.mlp_head.1" in name: lowerCamelCase = name.replace('module.mlp_head.1' , 'classifier.dense' ) return name def a_ ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ) -> Tuple: """simple docstring""" for key in orig_state_dict.copy().keys(): lowerCamelCase = orig_state_dict.pop(UpperCamelCase_ ) if "qkv" in key: lowerCamelCase = key.split('.' ) lowerCamelCase = int(key_split[3] ) lowerCamelCase = config.hidden_size if "weight" in key: lowerCamelCase = val[:dim, :] lowerCamelCase = val[dim : dim * 2, :] lowerCamelCase = val[-dim:, :] else: lowerCamelCase = val[:dim] lowerCamelCase = val[dim : dim * 2] lowerCamelCase = val[-dim:] else: lowerCamelCase = val return orig_state_dict def a_ ( UpperCamelCase_ : Union[str, Any] ) -> Dict: """simple docstring""" lowerCamelCase = [ 'module.v.head.weight', 'module.v.head.bias', 'module.v.head_dist.weight', 'module.v.head_dist.bias', ] for k in ignore_keys: state_dict.pop(UpperCamelCase_ , UpperCamelCase_ ) @torch.no_grad() def a_ ( UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : int=False ) -> Optional[Any]: """simple docstring""" lowerCamelCase = get_audio_spectrogram_transformer_config(UpperCamelCase_ ) lowerCamelCase = { 'ast-finetuned-audioset-10-10-0.4593': ( 'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1' ), 'ast-finetuned-audioset-10-10-0.450': ( 'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1' ), 'ast-finetuned-audioset-10-10-0.448': ( 'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1' ), 'ast-finetuned-audioset-10-10-0.448-v2': ( 'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1' ), 'ast-finetuned-audioset-12-12-0.447': ( 'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1' ), 'ast-finetuned-audioset-14-14-0.443': ( 'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1' ), 'ast-finetuned-audioset-16-16-0.442': ( 'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1' ), 'ast-finetuned-speech-commands-v2': ( 'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1' ), } # load original state_dict lowerCamelCase = model_name_to_url[model_name] lowerCamelCase = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location='cpu' ) # remove some keys remove_keys(UpperCamelCase_ ) # rename some keys lowerCamelCase = convert_state_dict(UpperCamelCase_ , UpperCamelCase_ ) # load 🤗 model lowerCamelCase = ASTForAudioClassification(UpperCamelCase_ ) model.eval() model.load_state_dict(UpperCamelCase_ ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 lowerCamelCase = -4.2_677_393 if 'speech-commands' not in model_name else -6.845_978 lowerCamelCase = 4.5_689_974 if 'speech-commands' not in model_name else 5.5_654_526 lowerCamelCase = 1_0_2_4 if 'speech-commands' not in model_name else 1_2_8 lowerCamelCase = ASTFeatureExtractor(mean=UpperCamelCase_ , std=UpperCamelCase_ , max_length=UpperCamelCase_ ) if "speech-commands" in model_name: lowerCamelCase = load_dataset('speech_commands' , 'v0.02' , split='validation' ) lowerCamelCase = dataset[0]['audio']['array'] else: lowerCamelCase = hf_hub_download( repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , ) lowerCamelCase , lowerCamelCase = torchaudio.load(UpperCamelCase_ ) lowerCamelCase = waveform.squeeze().numpy() lowerCamelCase = feature_extractor(UpperCamelCase_ , sampling_rate=1_6_0_0_0 , return_tensors='pt' ) # forward pass lowerCamelCase = model(**UpperCamelCase_ ) lowerCamelCase = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": lowerCamelCase = torch.tensor([-0.8_760, -7.0_042, -8.6_602] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": lowerCamelCase = torch.tensor([-1.1_986, -7.0_903, -8.2_718] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": lowerCamelCase = torch.tensor([-2.6_128, -8.0_080, -9.4_344] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": lowerCamelCase = torch.tensor([-1.5_080, -7.4_534, -8.8_917] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": lowerCamelCase = torch.tensor([-0.5_050, -6.5_833, -8.0_843] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": lowerCamelCase = torch.tensor([-0.3_826, -7.0_336, -8.2_413] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": lowerCamelCase = torch.tensor([-1.2_113, -6.9_101, -8.3_470] ) elif model_name == "ast-finetuned-speech-commands-v2": lowerCamelCase = torch.tensor([6.1_589, -8.0_566, -8.7_984] ) else: raise ValueError('Unknown model name' ) if not torch.allclose(logits[0, :3] , UpperCamelCase_ , atol=1E-4 ): raise ValueError('Logits don\'t match' ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ ) print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCamelCase_ ) print(f'''Saving feature extractor to {pytorch_dump_folder_path}''' ) feature_extractor.save_pretrained(UpperCamelCase_ ) if push_to_hub: print('Pushing model and feature extractor to the hub...' ) model.push_to_hub(f'''MIT/{model_name}''' ) feature_extractor.push_to_hub(f'''MIT/{model_name}''' ) if __name__ == "__main__": _lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='ast-finetuned-audioset-10-10-0.4593', type=str, help='Name of the Audio Spectrogram Transformer model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) _lowerCAmelCase : Tuple = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
246
1
from collections import defaultdict def lowerCamelCase__ ( __lowerCAmelCase : int ): """simple docstring""" lowerCAmelCase_ = 1 lowerCAmelCase_ = True for v in tree[start]: if v not in visited: ret += dfs(__lowerCAmelCase ) if ret % 2 == 0: cuts.append(__lowerCAmelCase ) return ret def lowerCamelCase__ ( ): """simple docstring""" dfs(1 ) if __name__ == "__main__": _A, _A = 10, 9 _A = defaultdict(list) _A = {} _A = [] _A = 0 _A = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
279
import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset _A = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class _lowerCAmelCase ( nn.Module ): def __init__( self , _UpperCamelCase ) -> Optional[Any]: super().__init__() lowerCAmelCase_ = torchvision.models.resnetaaa(pretrained=_UpperCamelCase ) lowerCAmelCase_ = list(model.children() )[:-2] lowerCAmelCase_ = nn.Sequential(*_UpperCamelCase ) lowerCAmelCase_ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def __a ( self , _UpperCamelCase ) -> Dict: # Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048 lowerCAmelCase_ = self.pool(self.model(_UpperCamelCase ) ) lowerCAmelCase_ = torch.flatten(_UpperCamelCase , start_dim=2 ) lowerCAmelCase_ = out.transpose(1 , 2 ).contiguous() return out # BxNx2048 class _lowerCAmelCase ( __a ): def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[str]: lowerCAmelCase_ = [json.loads(_UpperCamelCase ) for l in open(_UpperCamelCase )] lowerCAmelCase_ = os.path.dirname(_UpperCamelCase ) lowerCAmelCase_ = tokenizer lowerCAmelCase_ = labels lowerCAmelCase_ = len(_UpperCamelCase ) lowerCAmelCase_ = max_seq_length lowerCAmelCase_ = transforms def __len__( self ) -> Any: return len(self.data ) def __getitem__( self , _UpperCamelCase ) -> Any: lowerCAmelCase_ = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] , add_special_tokens=_UpperCamelCase ) ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = sentence[0], sentence[1:-1], sentence[-1] lowerCAmelCase_ = sentence[: self.max_seq_length] lowerCAmelCase_ = torch.zeros(self.n_classes ) lowerCAmelCase_ = 1 lowerCAmelCase_ = Image.open(os.path.join(self.data_dir , self.data[index]["img"] ) ).convert("RGB" ) lowerCAmelCase_ = self.transforms(_UpperCamelCase ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def __a ( self ) -> str: lowerCAmelCase_ = Counter() for row in self.data: label_freqs.update(row["label"] ) return label_freqs def lowerCamelCase__ ( __lowerCAmelCase : List[str] ): """simple docstring""" lowerCAmelCase_ = [len(row["sentence"] ) for row in batch] lowerCAmelCase_ , lowerCAmelCase_ = len(__lowerCAmelCase ), max(__lowerCAmelCase ) lowerCAmelCase_ = torch.zeros(__lowerCAmelCase , __lowerCAmelCase , dtype=torch.long ) lowerCAmelCase_ = torch.zeros(__lowerCAmelCase , __lowerCAmelCase , dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(__lowerCAmelCase , __lowerCAmelCase ) ): lowerCAmelCase_ = input_row["sentence"] lowerCAmelCase_ = 1 lowerCAmelCase_ = torch.stack([row["image"] for row in batch] ) lowerCAmelCase_ = torch.stack([row["label"] for row in batch] ) lowerCAmelCase_ = torch.stack([row["image_start_token"] for row in batch] ) lowerCAmelCase_ = torch.stack([row["image_end_token"] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def lowerCamelCase__ ( ): """simple docstring""" return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def lowerCamelCase__ ( ): """simple docstring""" return transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize( mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ), ] )
279
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { 'andreasmadsen/efficient_mlm_m0.40': ( 'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json' ), } class _SCREAMING_SNAKE_CASE ( A__ ): lowerCamelCase_ = '''roberta-prelayernorm''' def __init__( self : List[str] , snake_case_ : int=5_0265 , snake_case_ : Any=768 , snake_case_ : Dict=12 , snake_case_ : Optional[int]=12 , snake_case_ : Union[str, Any]=3072 , snake_case_ : Dict="gelu" , snake_case_ : List[str]=0.1 , snake_case_ : str=0.1 , snake_case_ : List[str]=512 , snake_case_ : Optional[int]=2 , snake_case_ : List[Any]=0.02 , snake_case_ : str=1E-12 , snake_case_ : Dict=1 , snake_case_ : Tuple=0 , snake_case_ : List[Any]=2 , snake_case_ : int="absolute" , snake_case_ : int=True , snake_case_ : List[str]=None , **snake_case_ : Tuple , ): """simple docstring""" super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) A : int = vocab_size A : List[Any] = hidden_size A : List[Any] = num_hidden_layers A : str = num_attention_heads A : Tuple = hidden_act A : List[Any] = intermediate_size A : Optional[int] = hidden_dropout_prob A : List[str] = attention_probs_dropout_prob A : List[str] = max_position_embeddings A : Dict = type_vocab_size A : List[Any] = initializer_range A : Dict = layer_norm_eps A : Dict = position_embedding_type A : Optional[Any] = use_cache A : Union[str, Any] = classifier_dropout class _SCREAMING_SNAKE_CASE ( A__ ): @property def _UpperCAmelCase ( self : Optional[Any] ): """simple docstring""" if self.task == "multiple-choice": A : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: A : Union[str, Any] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
256
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ): if principal <= 0: raise Exception('''Principal borrowed must be > 0''' ) if rate_per_annum < 0: raise Exception('''Rate of interest must be >= 0''' ) if years_to_repay <= 0 or not isinstance(lowercase__ , lowercase__ ): raise Exception('''Years to repay must be an integer > 0''' ) # Yearly rate is divided by 12 to get monthly rate __SCREAMING_SNAKE_CASE : int = rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly __SCREAMING_SNAKE_CASE : Union[str, Any] = years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
696
0
'''simple docstring''' import argparse import torch from transformers import GPTaLMHeadModel, RobertaForMaskedLM if __name__ == "__main__": lowerCAmelCase : Any = argparse.ArgumentParser( description=( 'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2']) parser.add_argument('--model_name', default='roberta-large', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') lowerCAmelCase : int = parser.parse_args() if args.model_type == "roberta": lowerCAmelCase : int = RobertaForMaskedLM.from_pretrained(args.model_name) lowerCAmelCase : int = 'roberta' elif args.model_type == "gpt2": lowerCAmelCase : Optional[int] = GPTaLMHeadModel.from_pretrained(args.model_name) lowerCAmelCase : Optional[int] = 'transformer' lowerCAmelCase : str = model.state_dict() lowerCAmelCase : List[str] = {} # Embeddings # if args.model_type == "gpt2": for param_name in ["wte.weight", "wpe.weight"]: lowerCAmelCase : Any = state_dict[f"""{prefix}.{param_name}"""] else: for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]: lowerCAmelCase : Tuple = f"""{prefix}.embeddings.{w}.weight""" lowerCAmelCase : str = state_dict[param_name] for w in ["weight", "bias"]: lowerCAmelCase : List[Any] = f"""{prefix}.embeddings.LayerNorm.{w}""" lowerCAmelCase : str = state_dict[param_name] # Transformer Blocks # lowerCAmelCase : Any = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: if args.model_type == "gpt2": for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]: for w in ["weight", "bias"]: lowerCAmelCase : int = state_dict[ f"""{prefix}.h.{teacher_idx}.{layer}.{w}""" ] lowerCAmelCase : Union[str, Any] = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""] else: for layer in [ "attention.self.query", "attention.self.key", "attention.self.value", "attention.output.dense", "attention.output.LayerNorm", "intermediate.dense", "output.dense", "output.LayerNorm", ]: for w in ["weight", "bias"]: lowerCAmelCase : Optional[Any] = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}""" ] std_idx += 1 # Language Modeling Head ###s if args.model_type == "roberta": for layer in ["lm_head.decoder.weight", "lm_head.bias"]: lowerCAmelCase : Any = state_dict[f"""{layer}"""] if args.vocab_transform: for w in ["weight", "bias"]: lowerCAmelCase : List[str] = state_dict[f"""lm_head.dense.{w}"""] lowerCAmelCase : Any = state_dict[f"""lm_head.layer_norm.{w}"""] elif args.model_type == "gpt2": for w in ["weight", "bias"]: lowerCAmelCase : Dict = state_dict[f"""{prefix}.ln_f.{w}"""] lowerCAmelCase : Tuple = state_dict['lm_head.weight'] print(f"""N layers selected for distillation: {std_idx}""") print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""") print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""") torch.save(compressed_sd, args.dump_checkpoint)
432
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class SCREAMING_SNAKE_CASE__ ( snake_case_): def __init__( self , A_ , A_ , A_ , A_ = None , )-> Optional[int]: '''simple docstring''' super().__init__() self.register_modules(transformer=A_ , vae=A_ , scheduler=A_ ) # create a imagenet -> id dictionary for easier use UpperCamelCase = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(',' ): UpperCamelCase = int(A_ ) UpperCamelCase = dict(sorted(self.labels.items() ) ) def UpperCAmelCase_ ( self , A_ )-> List[int]: '''simple docstring''' if not isinstance(A_ , A_ ): UpperCamelCase = list(A_ ) for l in label: if l not in self.labels: raise ValueError( F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' ) return [self.labels[l] for l in label] @torch.no_grad() def __call__( self , A_ , A_ = 4.0 , A_ = None , A_ = 50 , A_ = "pil" , A_ = True , )-> Union[ImagePipelineOutput, Tuple]: '''simple docstring''' UpperCamelCase = len(A_ ) UpperCamelCase = self.transformer.config.sample_size UpperCamelCase = self.transformer.config.in_channels UpperCamelCase = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=A_ , device=self.device , dtype=self.transformer.dtype , ) UpperCamelCase = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents UpperCamelCase = torch.tensor(A_ , device=self.device ).reshape(-1 ) UpperCamelCase = torch.tensor([1000] * batch_size , device=self.device ) UpperCamelCase = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(A_ ) for t in self.progress_bar(self.scheduler.timesteps ): if guidance_scale > 1: UpperCamelCase = latent_model_input[: len(A_ ) // 2] UpperCamelCase = torch.cat([half, half] , dim=0 ) UpperCamelCase = self.scheduler.scale_model_input(A_ , A_ ) UpperCamelCase = t if not torch.is_tensor(A_ ): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) UpperCamelCase = latent_model_input.device.type == 'mps' if isinstance(A_ , A_ ): UpperCamelCase = torch.floataa if is_mps else torch.floataa else: UpperCamelCase = torch.intaa if is_mps else torch.intaa UpperCamelCase = torch.tensor([timesteps] , dtype=A_ , device=latent_model_input.device ) elif len(timesteps.shape ) == 0: UpperCamelCase = timesteps[None].to(latent_model_input.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML UpperCamelCase = timesteps.expand(latent_model_input.shape[0] ) # predict noise model_output UpperCamelCase = self.transformer( A_ , timestep=A_ , class_labels=A_ ).sample # perform guidance if guidance_scale > 1: UpperCamelCase , UpperCamelCase = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] UpperCamelCase , UpperCamelCase = torch.split(A_ , len(A_ ) // 2 , dim=0 ) UpperCamelCase = uncond_eps + guidance_scale * (cond_eps - uncond_eps) UpperCamelCase = torch.cat([half_eps, half_eps] , dim=0 ) UpperCamelCase = torch.cat([eps, rest] , dim=1 ) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: UpperCamelCase , UpperCamelCase = torch.split(A_ , A_ , dim=1 ) else: UpperCamelCase = noise_pred # compute previous image: x_t -> x_t-1 UpperCamelCase = self.scheduler.step(A_ , A_ , A_ ).prev_sample if guidance_scale > 1: UpperCamelCase , UpperCamelCase = latent_model_input.chunk(2 , dim=0 ) else: UpperCamelCase = latent_model_input UpperCamelCase = 1 / self.vae.config.scaling_factor * latents UpperCamelCase = self.vae.decode(A_ ).sample UpperCamelCase = (samples / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCamelCase = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": UpperCamelCase = self.numpy_to_pil(A_ ) if not return_dict: return (samples,) return ImagePipelineOutput(images=A_ )
432
1
'''simple docstring''' def a ( _UpperCAmelCase ) -> list: """simple docstring""" if any(not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or x < 0 for x in sequence ): raise TypeError('Sequence must be list of non-negative integers' ) for _ in range(len(_UpperCAmelCase ) ): for i, (rod_upper, rod_lower) in enumerate(zip(_UpperCAmelCase , sequence[1:] ) ): if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower return sequence if __name__ == "__main__": assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
697
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase =logging.get_logger(__name__) __lowerCAmelCase ={ "SCUT-DLVCLab/lilt-roberta-en-base": ( "https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json" ), } class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = "lilt" def __init__( self , UpperCAmelCase__=3_0522 , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=2 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=0 , UpperCAmelCase__="absolute" , UpperCAmelCase__=None , UpperCAmelCase__=4 , UpperCAmelCase__=1024 , **UpperCAmelCase__ , ) -> Optional[Any]: super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) a_ = vocab_size a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = hidden_act a_ = intermediate_size a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = max_position_embeddings a_ = type_vocab_size a_ = initializer_range a_ = layer_norm_eps a_ = position_embedding_type a_ = classifier_dropout a_ = channel_shrink_ratio a_ = max_ad_position_embeddings
697
1
import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowercase__ ( self : Union[str, Any] ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights lowercase__ = FlaxDiffusionPipeline.from_pretrained( '''hf-internal-testing/tiny-stable-diffusion-pipe''', safety_checker=lowerCamelCase, cache_dir=lowerCamelCase ) lowercase__ = [t[-1] for t in os.walk(os.path.join(lowerCamelCase, os.listdir(lowerCamelCase )[0], '''snapshots''' ) )] lowercase__ = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith('''.bin''' ) for f in files ) @slow @require_flax class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowercase__ ( self : int ): '''simple docstring''' lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained( '''hf-internal-testing/tiny-stable-diffusion-pipe''', safety_checker=lowerCamelCase ) lowercase__ = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) lowercase__ = jax.random.PRNGKey(0 ) lowercase__ = 4 lowercase__ = jax.device_count() lowercase__ = num_samples * [prompt] lowercase__ = pipeline.prepare_inputs(lowerCamelCase ) # shard inputs and rng lowercase__ = replicate(lowerCamelCase ) lowercase__ = jax.random.split(lowerCamelCase, lowerCamelCase ) lowercase__ = shard(lowerCamelCase ) lowercase__ = pipeline(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, jit=lowerCamelCase ).images assert images.shape == (num_samples, 1, 64, 64, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 4.1514745 ) < 1E-3 assert np.abs(np.abs(lowerCamelCase, dtype=np.floataa ).sum() - 49947.875 ) < 5E-1 lowercase__ = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) ) assert len(lowerCamelCase ) == num_samples def lowercase__ ( self : Any ): '''simple docstring''' lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''', revision='''flax''', safety_checker=lowerCamelCase ) lowercase__ = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) lowercase__ = jax.random.PRNGKey(0 ) lowercase__ = 50 lowercase__ = jax.device_count() lowercase__ = num_samples * [prompt] lowercase__ = pipeline.prepare_inputs(lowerCamelCase ) # shard inputs and rng lowercase__ = replicate(lowerCamelCase ) lowercase__ = jax.random.split(lowerCamelCase, lowerCamelCase ) lowercase__ = shard(lowerCamelCase ) lowercase__ = pipeline(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, jit=lowerCamelCase ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.05652401) ) < 1E-3 assert np.abs((np.abs(lowerCamelCase, dtype=np.floataa ).sum() - 2383808.2) ) < 5E-1 def lowercase__ ( self : int ): '''simple docstring''' lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''', revision='''bf16''', dtype=jnp.bfloataa, safety_checker=lowerCamelCase ) lowercase__ = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) lowercase__ = jax.random.PRNGKey(0 ) lowercase__ = 50 lowercase__ = jax.device_count() lowercase__ = num_samples * [prompt] lowercase__ = pipeline.prepare_inputs(lowerCamelCase ) # shard inputs and rng lowercase__ = replicate(lowerCamelCase ) lowercase__ = jax.random.split(lowerCamelCase, lowerCamelCase ) lowercase__ = shard(lowerCamelCase ) lowercase__ = pipeline(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, jit=lowerCamelCase ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3 assert np.abs((np.abs(lowerCamelCase, dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1 def lowercase__ ( self : Dict ): '''simple docstring''' lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''', revision='''bf16''', dtype=jnp.bfloataa ) lowercase__ = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) lowercase__ = jax.random.PRNGKey(0 ) lowercase__ = 50 lowercase__ = jax.device_count() lowercase__ = num_samples * [prompt] lowercase__ = pipeline.prepare_inputs(lowerCamelCase ) # shard inputs and rng lowercase__ = replicate(lowerCamelCase ) lowercase__ = jax.random.split(lowerCamelCase, lowerCamelCase ) lowercase__ = shard(lowerCamelCase ) lowercase__ = pipeline(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, jit=lowerCamelCase ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3 assert np.abs((np.abs(lowerCamelCase, dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1 def lowercase__ ( self : Optional[int] ): '''simple docstring''' lowercase__ = FlaxDDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule='''scaled_linear''', set_alpha_to_one=lowerCamelCase, steps_offset=1, ) lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''', revision='''bf16''', dtype=jnp.bfloataa, scheduler=lowerCamelCase, safety_checker=lowerCamelCase, ) lowercase__ = scheduler.create_state() lowercase__ = scheduler_state lowercase__ = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) lowercase__ = jax.random.PRNGKey(0 ) lowercase__ = 50 lowercase__ = jax.device_count() lowercase__ = num_samples * [prompt] lowercase__ = pipeline.prepare_inputs(lowerCamelCase ) # shard inputs and rng lowercase__ = replicate(lowerCamelCase ) lowercase__ = jax.random.split(lowerCamelCase, lowerCamelCase ) lowercase__ = shard(lowerCamelCase ) lowercase__ = pipeline(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, jit=lowerCamelCase ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.045043945) ) < 1E-3 assert np.abs((np.abs(lowerCamelCase, dtype=np.floataa ).sum() - 2347693.5) ) < 5E-1 def lowercase__ ( self : str ): '''simple docstring''' lowercase__ = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) lowercase__ = jax.device_count() lowercase__ = num_samples * [prompt] lowercase__ = jax.random.split(jax.random.PRNGKey(0 ), lowerCamelCase ) lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''', revision='''bf16''', dtype=jnp.bfloataa, safety_checker=lowerCamelCase, ) lowercase__ = replicate(lowerCamelCase ) lowercase__ = pipeline.prepare_inputs(lowerCamelCase ) lowercase__ = shard(lowerCamelCase ) lowercase__ = pipeline(lowerCamelCase, lowerCamelCase, lowerCamelCase, jit=lowerCamelCase ).images assert images.shape == (num_samples, 1, 512, 512, 3) lowercase__ = images[2, 0, 256, 10:17, 1] # With memory efficient attention lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''', revision='''bf16''', dtype=jnp.bfloataa, safety_checker=lowerCamelCase, use_memory_efficient_attention=lowerCamelCase, ) lowercase__ = replicate(lowerCamelCase ) lowercase__ = pipeline.prepare_inputs(lowerCamelCase ) lowercase__ = shard(lowerCamelCase ) lowercase__ = pipeline(lowerCamelCase, lowerCamelCase, lowerCamelCase, jit=lowerCamelCase ).images assert images_eff.shape == (num_samples, 1, 512, 512, 3) lowercase__ = images[2, 0, 256, 10:17, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice ).max() < 1E-2
700
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bert import BertTokenizer A__ : Dict = logging.get_logger(__name__) A__ : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} A__ : Optional[int] = { 'vocab_file': { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt', 'bert-base-multilingual-uncased': ( 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt' ), 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt' ), 'bert-base-cased-finetuned-mrpc': ( 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt' ), 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt', 'bert-base-german-dbmdz-uncased': ( 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt' ), 'wietsedv/bert-base-dutch-cased': ( 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json', 'bert-base-multilingual-uncased': ( 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json' ), 'bert-base-multilingual-cased': ( 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json' ), 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json' ), 'bert-base-cased-finetuned-mrpc': ( 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json' ), 'bert-base-german-dbmdz-cased': ( 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json' ), 'bert-base-german-dbmdz-uncased': ( 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json' ), 'wietsedv/bert-base-dutch-cased': ( 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json' ), }, } A__ : List[str] = { 'bert-base-uncased': 5_12, 'bert-large-uncased': 5_12, 'bert-base-cased': 5_12, 'bert-large-cased': 5_12, 'bert-base-multilingual-uncased': 5_12, 'bert-base-multilingual-cased': 5_12, 'bert-base-chinese': 5_12, 'bert-base-german-cased': 5_12, 'bert-large-uncased-whole-word-masking': 5_12, 'bert-large-cased-whole-word-masking': 5_12, 'bert-large-uncased-whole-word-masking-finetuned-squad': 5_12, 'bert-large-cased-whole-word-masking-finetuned-squad': 5_12, 'bert-base-cased-finetuned-mrpc': 5_12, 'bert-base-german-dbmdz-cased': 5_12, 'bert-base-german-dbmdz-uncased': 5_12, 'TurkuNLP/bert-base-finnish-cased-v1': 5_12, 'TurkuNLP/bert-base-finnish-uncased-v1': 5_12, 'wietsedv/bert-base-dutch-cased': 5_12, } A__ : Optional[int] = { 'bert-base-uncased': {'do_lower_case': True}, 'bert-large-uncased': {'do_lower_case': True}, 'bert-base-cased': {'do_lower_case': False}, 'bert-large-cased': {'do_lower_case': False}, 'bert-base-multilingual-uncased': {'do_lower_case': True}, 'bert-base-multilingual-cased': {'do_lower_case': False}, 'bert-base-chinese': {'do_lower_case': False}, 'bert-base-german-cased': {'do_lower_case': False}, 'bert-large-uncased-whole-word-masking': {'do_lower_case': True}, 'bert-large-cased-whole-word-masking': {'do_lower_case': False}, 'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True}, 'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False}, 'bert-base-cased-finetuned-mrpc': {'do_lower_case': False}, 'bert-base-german-dbmdz-cased': {'do_lower_case': False}, 'bert-base-german-dbmdz-uncased': {'do_lower_case': True}, 'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False}, 'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True}, 'wietsedv/bert-base-dutch-cased': {'do_lower_case': False}, } class _UpperCAmelCase ( A__ ): """simple docstring""" lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_INIT_CONFIGURATION lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = BertTokenizer def __init__( self : Any, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Any=None, lowerCamelCase : Tuple=True, lowerCamelCase : Dict="[UNK]", lowerCamelCase : Any="[SEP]", lowerCamelCase : List[Any]="[PAD]", lowerCamelCase : Optional[Any]="[CLS]", lowerCamelCase : Dict="[MASK]", lowerCamelCase : List[Any]=True, lowerCamelCase : Tuple=None, **lowerCamelCase : Dict, ): '''simple docstring''' super().__init__( lowerCamelCase, tokenizer_file=lowerCamelCase, do_lower_case=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, pad_token=lowerCamelCase, cls_token=lowerCamelCase, mask_token=lowerCamelCase, tokenize_chinese_chars=lowerCamelCase, strip_accents=lowerCamelCase, **lowerCamelCase, ) lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''', lowerCamelCase ) != do_lower_case or normalizer_state.get('''strip_accents''', lowerCamelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''', lowerCamelCase ) != tokenize_chinese_chars ): lowercase__ = getattr(lowerCamelCase, normalizer_state.pop('''type''' ) ) lowercase__ = do_lower_case lowercase__ = strip_accents lowercase__ = tokenize_chinese_chars lowercase__ = normalizer_class(**lowerCamelCase ) lowercase__ = do_lower_case def lowercase__ ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : Dict=None ): '''simple docstring''' lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowercase__ ( self : List[Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase__ ( self : Any, lowerCamelCase : str, lowerCamelCase : Optional[str] = None ): '''simple docstring''' lowercase__ = self._tokenizer.model.save(lowerCamelCase, name=lowerCamelCase ) return tuple(lowerCamelCase )
671
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowerCAmelCase__ = { '''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''MobileViTFeatureExtractor'''] lowerCAmelCase__ = ['''MobileViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MobileViTForImageClassification''', '''MobileViTForSemanticSegmentation''', '''MobileViTModel''', '''MobileViTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFMobileViTForImageClassification''', '''TFMobileViTForSemanticSegmentation''', '''TFMobileViTModel''', '''TFMobileViTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
83
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowercase_ = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : List[Any] = ['pixel_values'] def __init__( self : List[str] , a : bool = True , a : Dict[str, int] = None , a : int = 0.9 , a : PILImageResampling = PILImageResampling.BICUBIC , a : bool = True , a : Dict[str, int] = None , a : Union[int, float] = 1 / 255 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : int , )-> None: """simple docstring""" super().__init__(**a ) lowercase__ = size if size is not None else {'shortest_edge': 224} lowercase__ = get_size_dict(a , default_to_square=a ) lowercase__ = crop_size if crop_size is not None else {'height': 224, 'width': 224} lowercase__ = get_size_dict(a , param_name='crop_size' ) lowercase__ = do_resize lowercase__ = size lowercase__ = crop_pct lowercase__ = resample lowercase__ = do_center_crop lowercase__ = crop_size lowercase__ = do_rescale lowercase__ = rescale_factor lowercase__ = do_normalize lowercase__ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN lowercase__ = image_std if image_std is not None else IMAGENET_DEFAULT_STD def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : np.ndarray , a : Dict[str, int] , a : Optional[float] = None , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : Tuple , )-> np.ndarray: """simple docstring""" lowercase__ = get_size_dict(a , default_to_square=a ) if "shortest_edge" not in size and ("height" not in size or "width" not in size): raise ValueError(f"""size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" ) if crop_pct is not None: if "shortest_edge" in size: lowercase__ = int(size['shortest_edge'] / crop_pct ) elif "height" in size and "width" in size: if size["height"] == size["width"]: lowercase__ = int(size['height'] / crop_pct ) else: lowercase__ = (int(size['height'] / crop_pct ), int(size['width'] / crop_pct )) else: raise ValueError('Invalid size for resize: {}'.format(a ) ) lowercase__ = get_resize_output_image_size(a , size=a , default_to_square=a ) else: if "shortest_edge" in size: lowercase__ = get_resize_output_image_size(a , size=size['shortest_edge'] , default_to_square=a ) elif "height" in size and "width" in size: lowercase__ = (size['height'], size['width']) else: raise ValueError('Invalid size for resize: {}'.format(a ) ) return resize(a , size=a , resample=a , data_format=a , **a ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : Tuple , )-> np.ndarray: """simple docstring""" lowercase__ = get_size_dict(a ) if "height" not in size or "width" not in size: raise ValueError(f"""size must contain 'height' and 'width' as keys. Got {size.keys()}""" ) return center_crop(a , size=(size['height'], size['width']) , data_format=a , **a ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : np.ndarray , a : Union[int, float] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[str] , )-> Optional[Any]: """simple docstring""" return rescale(a , scale=a , data_format=a , **a ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : str , )-> np.ndarray: """simple docstring""" return normalize(a , mean=a , std=a , data_format=a , **a ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : int = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : bool = None , a : float = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : ChannelDimension = ChannelDimension.FIRST , **a : Any , )-> PIL.Image.Image: """simple docstring""" lowercase__ = do_resize if do_resize is not None else self.do_resize lowercase__ = crop_pct if crop_pct is not None else self.crop_pct lowercase__ = resample if resample is not None else self.resample lowercase__ = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase__ = do_rescale if do_rescale is not None else self.do_rescale lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ = do_normalize if do_normalize is not None else self.do_normalize lowercase__ = image_mean if image_mean is not None else self.image_mean lowercase__ = image_std if image_std is not None else self.image_std lowercase__ = size if size is not None else self.size lowercase__ = get_size_dict(a , default_to_square=a ) lowercase__ = crop_size if crop_size is not None else self.crop_size lowercase__ = get_size_dict(a , param_name='crop_size' ) lowercase__ = make_list_of_images(a ) if not valid_images(a ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_center_crop and crop_pct is None: raise ValueError('Crop_pct must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. lowercase__ = [to_numpy_array(a ) for image in images] if do_resize: lowercase__ = [self.resize(image=a , size=a , crop_pct=a , resample=a ) for image in images] if do_center_crop: lowercase__ = [self.center_crop(image=a , size=a ) for image in images] if do_rescale: lowercase__ = [self.rescale(image=a , scale=a ) for image in images] if do_normalize: lowercase__ = [self.normalize(image=a , mean=a , std=a ) for image in images] lowercase__ = [to_channel_dimension_format(a , a ) for image in images] lowercase__ = {'pixel_values': images} return BatchFeature(data=a , tensor_type=a )
235
0
"""simple docstring""" def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> bool: a_ : Dict = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ = 5_000 ) -> int: a_ : str = [(i * (3 * i - 1)) // 2 for i in range(1, SCREAMING_SNAKE_CASE__ )] for i, pentagonal_i in enumerate(SCREAMING_SNAKE_CASE__ ): for j in range(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) ): a_ : Dict = pentagonal_nums[j] a_ : List[Any] = pentagonal_i + pentagonal_j a_ : List[str] = pentagonal_j - pentagonal_i if is_pentagonal(SCREAMING_SNAKE_CASE__ ) and is_pentagonal(SCREAMING_SNAKE_CASE__ ): return b return -1 if __name__ == "__main__": print(F"""{solution() = }""")
713
"""simple docstring""" import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class snake_case_ ( a_ ): __lowerCAmelCase = ["image_processor", "tokenizer"] __lowerCAmelCase = "ViltImageProcessor" __lowerCAmelCase = ("BertTokenizer", "BertTokenizerFast") def __init__( self , a_=None , a_=None , **a_ ): a_ : int = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , a_ , ) a_ : List[Any] = kwargs.pop("feature_extractor" ) a_ : List[str] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(a_ , a_ ) a_ : Dict = self.image_processor def __call__( self , a_ , a_ = None , a_ = True , a_ = False , a_ = None , a_ = None , a_ = 0 , a_ = None , a_ = None , a_ = None , a_ = False , a_ = False , a_ = False , a_ = False , a_ = True , a_ = None , **a_ , ): a_ : Union[str, Any] = self.tokenizer( text=a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , stride=a_ , pad_to_multiple_of=a_ , return_token_type_ids=a_ , return_attention_mask=a_ , return_overflowing_tokens=a_ , return_special_tokens_mask=a_ , return_offsets_mapping=a_ , return_length=a_ , verbose=a_ , return_tensors=a_ , **a_ , ) # add pixel_values + pixel_mask a_ : List[Any] = self.image_processor(a_ , return_tensors=a_ ) encoding.update(a_ ) return encoding def snake_case_ ( self , *a_ , **a_ ): return self.tokenizer.batch_decode(*a_ , **a_ ) def snake_case_ ( self , *a_ , **a_ ): return self.tokenizer.decode(*a_ , **a_ ) @property def snake_case_ ( self ): a_ : Union[str, Any] = self.tokenizer.model_input_names a_ : Dict = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def snake_case_ ( self ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a_ , ) return self.image_processor_class @property def snake_case_ ( self ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a_ , ) return self.image_processor
370
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _lowercase : List[str] = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Union[str, Any] = ["""EncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : List[Any] = ["""TFEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[Any] = ["""FlaxEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys _lowercase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
210
'''simple docstring''' from ...configuration_utils import PretrainedConfig class A__ ( _snake_case ): lowercase = "bert-generation" def __init__( self , UpperCamelCase__=50358 , UpperCamelCase__=1024 , UpperCamelCase__=24 , UpperCamelCase__=16 , UpperCamelCase__=4096 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-1_2 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__=1 , UpperCamelCase__="absolute" , UpperCamelCase__=True , **UpperCamelCase__ , ) -> Any: '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ ) A_ = vocab_size A_ = hidden_size A_ = num_hidden_layers A_ = num_attention_heads A_ = hidden_act A_ = intermediate_size A_ = hidden_dropout_prob A_ = attention_probs_dropout_prob A_ = max_position_embeddings A_ = initializer_range A_ = layer_norm_eps A_ = position_embedding_type A_ = use_cache
288
0
import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase ( self ): for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(UpperCamelCase_ ): lowercase_ :int = AutoConfig.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) lowercase_ :Any = FlaxAutoModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) @slow def UpperCamelCase ( self ): for model_name in ["roberta-base", "roberta-large"]: with self.subTest(UpperCamelCase_ ): lowercase_ :str = AutoConfig.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) lowercase_ :Any = FlaxAutoModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) @slow def UpperCamelCase ( self ): for model_name in ["bert-base-cased", "bert-large-uncased"]: lowercase_ :Optional[int] = AutoTokenizer.from_pretrained(UpperCamelCase_ ) lowercase_ :int = FlaxBertModel.from_pretrained(UpperCamelCase_ ) lowercase_ :Dict = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX ) @jax.jit def eval(**UpperCamelCase_ ): return model(**UpperCamelCase_ ) eval(**UpperCamelCase_ ).block_until_ready() @slow def UpperCamelCase ( self ): for model_name in ["roberta-base", "roberta-large"]: lowercase_ :int = AutoTokenizer.from_pretrained(UpperCamelCase_ ) lowercase_ :int = FlaxRobertaModel.from_pretrained(UpperCamelCase_ ) lowercase_ :Any = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX ) @jax.jit def eval(**UpperCamelCase_ ): return model(**UpperCamelCase_ ) eval(**UpperCamelCase_ ).block_until_ready() def UpperCamelCase ( self ): with self.assertRaisesRegex( UpperCamelCase_ , '''bert-base is not a local folder and is not a valid model identifier''' ): lowercase_ :Optional[Any] = FlaxAutoModel.from_pretrained('''bert-base''' ) def UpperCamelCase ( self ): with self.assertRaisesRegex( UpperCamelCase_ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): lowercase_ :Union[str, Any] = FlaxAutoModel.from_pretrained(UpperCamelCase_ , revision='''aaaaaa''' ) def UpperCamelCase ( self ): with self.assertRaisesRegex( UpperCamelCase_ , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ): lowercase_ :List[Any] = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' ) def UpperCamelCase ( self ): with self.assertRaisesRegex(UpperCamelCase_ , '''Use `from_pt=True` to load this model''' ): lowercase_ :Tuple = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
713
import gc import random import unittest import numpy as np import torch from transformers import XLMRobertaTokenizer from diffusers import ( AltDiffusionImgaImgPipeline, AutoencoderKL, PNDMScheduler, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def UpperCamelCase ( self ): lowercase_ :Dict = 1 lowercase_ :Optional[Any] = 3 lowercase_ :Optional[int] = (32, 32) lowercase_ :Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ ) return image @property def UpperCamelCase ( self ): torch.manual_seed(0 ) lowercase_ :Optional[int] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) return model @property def UpperCamelCase ( self ): torch.manual_seed(0 ) lowercase_ :Dict = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) return model @property def UpperCamelCase ( self ): torch.manual_seed(0 ) lowercase_ :str = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , ) return RobertaSeriesModelWithTransformation(UpperCamelCase_ ) @property def UpperCamelCase ( self ): def extract(*UpperCamelCase_ , **UpperCamelCase_ ): class UpperCamelCase : '''simple docstring''' def __init__( self ): lowercase_ :List[str] = torch.ones([0] ) def UpperCamelCase ( self , UpperCamelCase_ ): self.pixel_values.to(UpperCamelCase_ ) return self return Out() return extract def UpperCamelCase ( self ): lowercase_ :List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowercase_ :Dict = self.dummy_cond_unet lowercase_ :Optional[Any] = PNDMScheduler(skip_prk_steps=UpperCamelCase_ ) lowercase_ :Union[str, Any] = self.dummy_vae lowercase_ :Any = self.dummy_text_encoder lowercase_ :List[Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' ) lowercase_ :List[str] = 77 lowercase_ :int = self.dummy_image.to(UpperCamelCase_ ) lowercase_ :Optional[Any] = init_image / 2 + 0.5 # make sure here that pndm scheduler skips prk lowercase_ :str = AltDiffusionImgaImgPipeline( unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=self.dummy_extractor , ) lowercase_ :List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase_ ) lowercase_ :int = alt_pipe.to(UpperCamelCase_ ) alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) lowercase_ :Any = '''A painting of a squirrel eating a burger''' lowercase_ :Tuple = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 ) lowercase_ :Optional[int] = alt_pipe( [prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=UpperCamelCase_ , ) lowercase_ :Dict = output.images lowercase_ :Tuple = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 ) lowercase_ :Optional[int] = alt_pipe( [prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0] lowercase_ :Optional[int] = image[0, -3:, -3:, -1] lowercase_ :Dict = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowercase_ :int = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3 @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def UpperCamelCase ( self ): lowercase_ :List[str] = self.dummy_cond_unet lowercase_ :Dict = PNDMScheduler(skip_prk_steps=UpperCamelCase_ ) lowercase_ :Tuple = self.dummy_vae lowercase_ :Dict = self.dummy_text_encoder lowercase_ :Tuple = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' ) lowercase_ :str = 77 lowercase_ :str = self.dummy_image.to(UpperCamelCase_ ) # put models in fp16 lowercase_ :Union[str, Any] = unet.half() lowercase_ :Union[str, Any] = vae.half() lowercase_ :List[str] = bert.half() # make sure here that pndm scheduler skips prk lowercase_ :List[Any] = AltDiffusionImgaImgPipeline( unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=self.dummy_extractor , ) lowercase_ :List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase_ ) lowercase_ :List[str] = alt_pipe.to(UpperCamelCase_ ) alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) lowercase_ :str = '''A painting of a squirrel eating a burger''' lowercase_ :Union[str, Any] = torch.manual_seed(0 ) lowercase_ :Any = alt_pipe( [prompt] , generator=UpperCamelCase_ , num_inference_steps=2 , output_type='''np''' , image=UpperCamelCase_ , ).images assert image.shape == (1, 32, 32, 3) @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def UpperCamelCase ( self ): lowercase_ :Optional[int] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) # resize to resolution that is divisible by 8 but not 16 or 32 lowercase_ :Optional[Any] = init_image.resize((760, 504) ) lowercase_ :List[str] = '''BAAI/AltDiffusion''' lowercase_ :Optional[Any] = AltDiffusionImgaImgPipeline.from_pretrained( UpperCamelCase_ , safety_checker=UpperCamelCase_ , ) pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) pipe.enable_attention_slicing() lowercase_ :Optional[Any] = '''A fantasy landscape, trending on artstation''' lowercase_ :Optional[Any] = torch.manual_seed(0 ) lowercase_ :str = pipe( prompt=UpperCamelCase_ , image=UpperCamelCase_ , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase_ , output_type='''np''' , ) lowercase_ :Optional[Any] = output.images[0] lowercase_ :Optional[int] = image[255:258, 383:386, -1] assert image.shape == (504, 760, 3) lowercase_ :Any = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase ( self ): lowercase_ :Union[str, Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) lowercase_ :Any = init_image.resize((768, 512) ) lowercase_ :List[str] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' ) lowercase_ :List[Any] = '''BAAI/AltDiffusion''' lowercase_ :Tuple = AltDiffusionImgaImgPipeline.from_pretrained( UpperCamelCase_ , safety_checker=UpperCamelCase_ , ) pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) pipe.enable_attention_slicing() lowercase_ :List[str] = '''A fantasy landscape, trending on artstation''' lowercase_ :Optional[int] = torch.manual_seed(0 ) lowercase_ :Tuple = pipe( prompt=UpperCamelCase_ , image=UpperCamelCase_ , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase_ , output_type='''np''' , ) lowercase_ :int = output.images[0] assert image.shape == (512, 768, 3) # img2img is flaky across GPUs even in fp32, so using MAE here assert np.abs(expected_image - image ).max() < 1E-2
441
0
from __future__ import annotations def __SCREAMING_SNAKE_CASE ( a__ : list[list[int]] ) -> int: # preprocessing the first row for i in range(1 ,len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 ,len(a__ ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 ,len(a__ ) ): for j in range(1 ,len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] ,matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
17
import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer _snake_case = logging.get_logger(__name__) class lowerCAmelCase ( lowercase_ ): __lowerCamelCase = 'AutoTokenizer' __lowerCamelCase = ['tokenizer'] __lowerCamelCase = { 'semantic_prompt': 1, 'coarse_prompt': 2, 'fine_prompt': 2, } def __init__( self :Dict , _lowercase :List[str] , _lowercase :List[Any]=None ): '''simple docstring''' super().__init__(_lowercase ) lowercase__ = speaker_embeddings @classmethod def UpperCAmelCase ( cls :Any , _lowercase :int , _lowercase :str="speaker_embeddings_path.json" , **_lowercase :List[str] ): '''simple docstring''' if speaker_embeddings_dict_path is not None: lowercase__ = get_file_from_repo( _lowercase , _lowercase , subfolder=kwargs.pop("subfolder" , _lowercase ) , cache_dir=kwargs.pop("cache_dir" , _lowercase ) , force_download=kwargs.pop("force_download" , _lowercase ) , proxies=kwargs.pop("proxies" , _lowercase ) , resume_download=kwargs.pop("resume_download" , _lowercase ) , local_files_only=kwargs.pop("local_files_only" , _lowercase ) , use_auth_token=kwargs.pop("use_auth_token" , _lowercase ) , revision=kwargs.pop("revision" , _lowercase ) , ) if speaker_embeddings_path is None: logger.warning( f'''`{os.path.join(_lowercase , _lowercase )}` does not exists , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' ) lowercase__ = None else: with open(_lowercase ) as speaker_embeddings_json: lowercase__ = json.load(_lowercase ) else: lowercase__ = None lowercase__ = AutoTokenizer.from_pretrained(_lowercase , **_lowercase ) return cls(tokenizer=_lowercase , speaker_embeddings=_lowercase ) def UpperCAmelCase ( self :Any , _lowercase :Any , _lowercase :List[str]="speaker_embeddings_path.json" , _lowercase :Any="speaker_embeddings" , _lowercase :bool = False , **_lowercase :Any , ): '''simple docstring''' if self.speaker_embeddings is not None: os.makedirs(os.path.join(_lowercase , _lowercase , "v2" ) , exist_ok=_lowercase ) lowercase__ = {} lowercase__ = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": lowercase__ = self._load_voice_preset(_lowercase ) lowercase__ = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict["repo_or_path"] , _lowercase , f'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=_lowercase , ) lowercase__ = os.path.join(_lowercase , f'''{prompt_key}_{key}.npy''' ) lowercase__ = tmp_dict with open(os.path.join(_lowercase , _lowercase ) , "w" ) as fp: json.dump(_lowercase , _lowercase ) super().save_pretrained(_lowercase , _lowercase , **_lowercase ) def UpperCAmelCase ( self :Optional[int] , _lowercase :str = None , **_lowercase :List[Any] ): '''simple docstring''' lowercase__ = self.speaker_embeddings[voice_preset] lowercase__ = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( f'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' ) lowercase__ = get_file_from_repo( self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , _lowercase ) , cache_dir=kwargs.pop("cache_dir" , _lowercase ) , force_download=kwargs.pop("force_download" , _lowercase ) , proxies=kwargs.pop("proxies" , _lowercase ) , resume_download=kwargs.pop("resume_download" , _lowercase ) , local_files_only=kwargs.pop("local_files_only" , _lowercase ) , use_auth_token=kwargs.pop("use_auth_token" , _lowercase ) , revision=kwargs.pop("revision" , _lowercase ) , ) if path is None: raise ValueError( f'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset} embeddings.''' ) lowercase__ = np.load(_lowercase ) return voice_preset_dict def UpperCAmelCase ( self :Optional[int] , _lowercase :Optional[dict] = None ): '''simple docstring''' for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(f'''Voice preset unrecognized, missing {key} as a key.''' ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' ) def __call__( self :Optional[Any] , _lowercase :Optional[Any]=None , _lowercase :List[str]=None , _lowercase :List[str]="pt" , _lowercase :List[Any]=2_56 , _lowercase :List[str]=False , _lowercase :Union[str, Any]=True , _lowercase :Dict=False , **_lowercase :Tuple , ): '''simple docstring''' if voice_preset is not None and not isinstance(_lowercase , _lowercase ): if ( isinstance(_lowercase , _lowercase ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): lowercase__ = self._load_voice_preset(_lowercase ) else: if isinstance(_lowercase , _lowercase ) and not voice_preset.endswith(".npz" ): lowercase__ = voice_preset + ".npz" lowercase__ = np.load(_lowercase ) if voice_preset is not None: self._validate_voice_preset_dict(_lowercase , **_lowercase ) lowercase__ = BatchFeature(data=_lowercase , tensor_type=_lowercase ) lowercase__ = self.tokenizer( _lowercase , return_tensors=_lowercase , padding="max_length" , max_length=_lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , add_special_tokens=_lowercase , **_lowercase , ) if voice_preset is not None: lowercase__ = voice_preset return encoded_text
655
0
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = ["image_processor", "tokenizer"] lowerCAmelCase = "ViTImageProcessor" lowerCAmelCase = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase): '''simple docstring''' __A : str = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , _UpperCAmelCase , ) __A : str = kwargs.pop('feature_extractor') __A : Optional[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.') if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.') super().__init__(_UpperCAmelCase , _UpperCAmelCase) def __call__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase): '''simple docstring''' if text is None and visual_prompt is None and images is None: raise ValueError('You have to specify either text, visual prompt or images.') if text is not None and visual_prompt is not None: raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.') if text is not None: __A : Tuple = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase) if visual_prompt is not None: __A : Dict = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase) if images is not None: __A : List[Any] = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase) if visual_prompt is not None and images is not None: __A : Optional[int] = { 'pixel_values': image_features.pixel_values, 'conditional_pixel_values': prompt_features.pixel_values, } return encoding elif text is not None and images is not None: __A : List[Any] = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: __A : Union[str, Any] = { 'conditional_pixel_values': prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**_UpperCAmelCase) , tensor_type=_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase): '''simple docstring''' return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase): '''simple docstring''' return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase) @property def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , ) return self.image_processor
706
'''simple docstring''' import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowercase__ : Optional[int] = logging.get_logger(__name__) lowercase__ : List[str] = {'''vocab_file''': '''spiece.model'''} lowercase__ : Optional[int] = { '''vocab_file''': { '''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''', '''google/bigbird-roberta-large''': ( '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model''' ), } } lowercase__ : List[str] = { '''google/bigbird-roberta-base''': 40_96, '''google/bigbird-roberta-large''': 40_96, '''google/bigbird-base-trivia-itc''': 40_96, } class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = ['''input_ids''', '''attention_mask'''] lowerCAmelCase = [] def __init__( self , _UpperCAmelCase , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="[MASK]" , _UpperCAmelCase="[CLS]" , _UpperCAmelCase = None , **_UpperCAmelCase , ): '''simple docstring''' __A : Union[str, Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else bos_token __A : List[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else eos_token __A : Optional[int] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else unk_token __A : str = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else pad_token __A : int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else cls_token __A : Optional[int] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else sep_token # Mask token behave like a normal word, i.e. include the space before it __A : Optional[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else mask_token __A : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , ) __A : Tuple = vocab_file __A : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(_UpperCAmelCase) @property def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' return self.sp_model.get_piece_size() def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[Any] = {self.convert_ids_to_tokens(_UpperCAmelCase): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self): '''simple docstring''' __A : Optional[int] = self.__dict__.copy() __A : List[str] = None return state def __setstate__( self , _UpperCAmelCase): '''simple docstring''' __A : Dict = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs'): __A : Tuple = {} __A : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase): '''simple docstring''' return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase): '''simple docstring''' return self.sp_model.piece_to_id(_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase): '''simple docstring''' __A : Optional[Any] = self.sp_model.IdToPiece(_UpperCAmelCase) return token def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase): '''simple docstring''' __A : str = [] __A : int = '' __A : str = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_UpperCAmelCase) + token __A : Dict = True __A : List[Any] = [] else: current_sub_tokens.append(_UpperCAmelCase) __A : Union[str, Any] = False out_string += self.sp_model.decode(_UpperCAmelCase) return out_string.strip() def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = True , **_UpperCAmelCase , ): '''simple docstring''' __A : Tuple = kwargs.pop('use_source_tokenizer' , _UpperCAmelCase) __A : str = self.convert_ids_to_tokens(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 __A : Dict = [] __A : Dict = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_UpperCAmelCase)) __A : Any = [] sub_texts.append(_UpperCAmelCase) else: current_sub_text.append(_UpperCAmelCase) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_UpperCAmelCase)) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: __A : Tuple = re.sub(R' (\[(MASK|SEP)\])' , R'\1' , ' '.join(_UpperCAmelCase)) else: __A : Any = ''.join(_UpperCAmelCase) __A : List[str] = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: __A : str = self.clean_up_tokenization(_UpperCAmelCase) return clean_text else: return text def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None): '''simple docstring''' if not os.path.isdir(_UpperCAmelCase): logger.error(F'Vocabulary path ({save_directory}) should be a directory') return __A : int = os.path.join( _UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCAmelCase) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , _UpperCAmelCase) elif not os.path.isfile(self.vocab_file): with open(_UpperCAmelCase , 'wb') as fi: __A : Any = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase) return (out_vocab_file,) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __A : Union[str, Any] = [self.cls_token_id] __A : Dict = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase) if token_ids_a is None: return [1] + ([0] * len(_UpperCAmelCase)) + [1] return [1] + ([0] * len(_UpperCAmelCase)) + [1] + ([0] * len(_UpperCAmelCase)) + [1] def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None): '''simple docstring''' __A : str = [self.sep_token_id] __A : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
338
0
'''simple docstring''' def __lowercase (_lowercase = 100 ) -> int: """simple docstring""" __lowerCamelCase : str = n * (n + 1) * (2 * n + 1) / 6 __lowerCamelCase : int = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(f'''{solution() = }''')
150
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ :List[str] = logging.get_logger(__name__) UpperCAmelCase__ :Union[str, Any] = { """BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""", # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): snake_case__ : str = 'altclip_text_model' def __init__( self : List[Any] , A__ : Optional[int]=250002 , A__ : Any=1024 , A__ : List[Any]=24 , A__ : Dict=16 , A__ : Union[str, Any]=4096 , A__ : Union[str, Any]="gelu" , A__ : str=0.1 , A__ : int=0.1 , A__ : str=514 , A__ : Optional[int]=1 , A__ : Optional[Any]=0.02 , A__ : int=0.02 , A__ : Optional[Any]=1e-0_5 , A__ : int=1 , A__ : Optional[Any]=0 , A__ : Dict=2 , A__ : Optional[int]="absolute" , A__ : Optional[int]=True , A__ : List[str]=768 , **A__ : List[str] , ): """simple docstring""" super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ ) __lowerCamelCase : List[str] = vocab_size __lowerCamelCase : Optional[Any] = hidden_size __lowerCamelCase : List[str] = num_hidden_layers __lowerCamelCase : Tuple = num_attention_heads __lowerCamelCase : List[Any] = hidden_act __lowerCamelCase : Union[str, Any] = intermediate_size __lowerCamelCase : Dict = hidden_dropout_prob __lowerCamelCase : Any = attention_probs_dropout_prob __lowerCamelCase : List[str] = max_position_embeddings __lowerCamelCase : Optional[Any] = type_vocab_size __lowerCamelCase : int = initializer_range __lowerCamelCase : Optional[int] = initializer_factor __lowerCamelCase : List[Any] = layer_norm_eps __lowerCamelCase : List[str] = position_embedding_type __lowerCamelCase : str = use_cache __lowerCamelCase : Optional[Any] = project_dim class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): snake_case__ : List[Any] = 'altclip_vision_model' def __init__( self : Optional[int] , A__ : str=768 , A__ : str=3072 , A__ : str=512 , A__ : Optional[int]=12 , A__ : List[Any]=12 , A__ : Union[str, Any]=3 , A__ : Dict=224 , A__ : List[Any]=32 , A__ : List[Any]="quick_gelu" , A__ : Dict=1e-5 , A__ : List[str]=0.0 , A__ : Dict=0.02 , A__ : List[str]=1.0 , **A__ : Union[str, Any] , ): """simple docstring""" super().__init__(**A__ ) __lowerCamelCase : Optional[int] = hidden_size __lowerCamelCase : Optional[int] = intermediate_size __lowerCamelCase : Optional[Any] = projection_dim __lowerCamelCase : Union[str, Any] = num_hidden_layers __lowerCamelCase : Optional[Any] = num_attention_heads __lowerCamelCase : str = num_channels __lowerCamelCase : Any = patch_size __lowerCamelCase : Any = image_size __lowerCamelCase : Any = initializer_range __lowerCamelCase : List[str] = initializer_factor __lowerCamelCase : List[str] = attention_dropout __lowerCamelCase : Any = layer_norm_eps __lowerCamelCase : Any = hidden_act @classmethod def a_ ( cls : str , A__ : Union[str, os.PathLike] , **A__ : List[str] ): """simple docstring""" cls._set_token_in_kwargs(A__ ) __lowerCamelCase , __lowerCamelCase : str = cls.get_config_dict(A__ , **A__ ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get("""model_type""" ) == "altclip": __lowerCamelCase : Optional[Any] = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(A__ , **A__ ) class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): snake_case__ : int = 'altclip' snake_case__ : Dict = True def __init__( self : Optional[Any] , A__ : Optional[Any]=None , A__ : Union[str, Any]=None , A__ : Union[str, Any]=768 , A__ : Tuple=2.6592 , **A__ : List[Any] ): """simple docstring""" __lowerCamelCase : str = kwargs.pop("""text_config_dict""" , A__ ) __lowerCamelCase : Dict = kwargs.pop("""vision_config_dict""" , A__ ) super().__init__(**A__ ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: __lowerCamelCase : Any = {} # This is the complete result when using `text_config_dict`. __lowerCamelCase : Tuple = AltCLIPTextConfig(**A__ ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: __lowerCamelCase : Optional[Any] = ( f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. " f"The value `text_config_dict[\"{key}\"]` will be used instead." ) # If inferred from default argument values (just to be super careful) else: __lowerCamelCase : int = ( f"`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The " f"value `text_config[\"{key}\"]` will be overriden." ) logger.warning(A__ ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: __lowerCamelCase : Dict = {} # This is the complete result when using `vision_config_dict`. __lowerCamelCase : List[str] = AltCLIPVisionConfig(**A__ ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: __lowerCamelCase : str = { str(A__ ): value for key, value in _vision_config_dict["""id2label"""].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: __lowerCamelCase : List[Any] = ( f"`{key}` is found in both `vision_config_dict` and `vision_config` but with different " f"values. The value `vision_config_dict[\"{key}\"]` will be used instead." ) # If inferred from default argument values (just to be super careful) else: __lowerCamelCase : Optional[Any] = ( f"`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. " f"The value `vision_config[\"{key}\"]` will be overriden." ) logger.warning(A__ ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: __lowerCamelCase : List[Any] = {} logger.info("""`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.""" ) if vision_config is None: __lowerCamelCase : List[str] = {} logger.info("""`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.""" ) __lowerCamelCase : Union[str, Any] = AltCLIPTextConfig(**A__ ) __lowerCamelCase : Optional[int] = AltCLIPVisionConfig(**A__ ) __lowerCamelCase : Optional[Any] = projection_dim __lowerCamelCase : List[str] = logit_scale_init_value __lowerCamelCase : Union[str, Any] = 1.0 @classmethod def a_ ( cls : Optional[Any] , A__ : AltCLIPTextConfig , A__ : AltCLIPVisionConfig , **A__ : List[str] ): """simple docstring""" return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A__ ) def a_ ( self : Optional[int] ): """simple docstring""" __lowerCamelCase : List[Any] = copy.deepcopy(self.__dict__ ) __lowerCamelCase : Optional[Any] = self.text_config.to_dict() __lowerCamelCase : Tuple = self.vision_config.to_dict() __lowerCamelCase : Tuple = self.__class__.model_type return output
150
1
'''simple docstring''' import unittest import numpy as np from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING from transformers.pipelines import AudioClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_torchaudio, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class _lowercase ( unittest.TestCase ): '''simple docstring''' _SCREAMING_SNAKE_CASE : Any = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING _SCREAMING_SNAKE_CASE : Union[str, Any] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]: __lowerCAmelCase = AudioClassificationPipeline(model=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ ) # test with a raw waveform __lowerCAmelCase = np.zeros((3_40_00,) ) __lowerCAmelCase = np.zeros((1_40_00,) ) return audio_classifier, [audioa, audio] def a ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[str]: __lowerCAmelCase , __lowerCAmelCase = examples __lowerCAmelCase = audio_classifier(SCREAMING_SNAKE_CASE__ ) # by default a model is initialized with num_labels=2 self.assertEqual( SCREAMING_SNAKE_CASE__ , [ {"""score""": ANY(SCREAMING_SNAKE_CASE__ ), """label""": ANY(SCREAMING_SNAKE_CASE__ )}, {"""score""": ANY(SCREAMING_SNAKE_CASE__ ), """label""": ANY(SCREAMING_SNAKE_CASE__ )}, ] , ) __lowerCAmelCase = audio_classifier(SCREAMING_SNAKE_CASE__ , top_k=1 ) self.assertEqual( SCREAMING_SNAKE_CASE__ , [ {"""score""": ANY(SCREAMING_SNAKE_CASE__ ), """label""": ANY(SCREAMING_SNAKE_CASE__ )}, ] , ) self.run_torchaudio(SCREAMING_SNAKE_CASE__ ) @require_torchaudio def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict ) -> List[str]: import datasets # test with a local file __lowerCAmelCase = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" ) __lowerCAmelCase = dataset[0]["""audio"""]["""array"""] __lowerCAmelCase = audio_classifier(SCREAMING_SNAKE_CASE__ ) self.assertEqual( SCREAMING_SNAKE_CASE__ , [ {"""score""": ANY(SCREAMING_SNAKE_CASE__ ), """label""": ANY(SCREAMING_SNAKE_CASE__ )}, {"""score""": ANY(SCREAMING_SNAKE_CASE__ ), """label""": ANY(SCREAMING_SNAKE_CASE__ )}, ] , ) @require_torch def a ( self : str ) -> Union[str, Any]: __lowerCAmelCase = """anton-l/wav2vec2-random-tiny-classifier""" __lowerCAmelCase = pipeline("""audio-classification""" , model=SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = np.ones((80_00,) ) __lowerCAmelCase = audio_classifier(SCREAMING_SNAKE_CASE__ , top_k=4 ) __lowerCAmelCase = [ {"""score""": 0.0_8_4_2, """label""": """no"""}, {"""score""": 0.0_8_3_8, """label""": """up"""}, {"""score""": 0.0_8_3_7, """label""": """go"""}, {"""score""": 0.0_8_3_4, """label""": """right"""}, ] __lowerCAmelCase = [ {"""score""": 0.0_8_4_5, """label""": """stop"""}, {"""score""": 0.0_8_4_4, """label""": """on"""}, {"""score""": 0.0_8_4_1, """label""": """right"""}, {"""score""": 0.0_8_3_4, """label""": """left"""}, ] self.assertIn(nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] ) __lowerCAmelCase = {"""array""": np.ones((80_00,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate} __lowerCAmelCase = audio_classifier(SCREAMING_SNAKE_CASE__ , top_k=4 ) self.assertIn(nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] ) @require_torch @slow def a ( self : List[str] ) -> List[str]: import datasets __lowerCAmelCase = """superb/wav2vec2-base-superb-ks""" __lowerCAmelCase = pipeline("""audio-classification""" , model=SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" ) __lowerCAmelCase = np.array(dataset[3]["""speech"""] , dtype=np.floataa ) __lowerCAmelCase = audio_classifier(SCREAMING_SNAKE_CASE__ , top_k=4 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=3 ) , [ {"""score""": 0.9_8_1, """label""": """go"""}, {"""score""": 0.0_0_7, """label""": """up"""}, {"""score""": 0.0_0_6, """label""": """_unknown_"""}, {"""score""": 0.0_0_1, """label""": """down"""}, ] , ) @require_tf @unittest.skip("""Audio classification is not implemented for TF""" ) def a ( self : int ) -> Optional[int]: pass
330
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType _A : List[str] = logging.get_logger(__name__) _A : List[Any] = { '''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''', } # fmt: off _A : str = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 357, 366, 438, 532, 685, 705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377, 1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211, 4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786, 11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791, 17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409, 34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361 ] _A : Any = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 359, 503, 522, 542, 873, 893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627, 3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647, 7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793, 14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675, 22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865, 42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362 ] class _lowercase ( UpperCAmelCase__ ): '''simple docstring''' _SCREAMING_SNAKE_CASE : List[str] = """whisper""" _SCREAMING_SNAKE_CASE : List[Any] = ["""past_key_values"""] _SCREAMING_SNAKE_CASE : Optional[int] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_18_65 , SCREAMING_SNAKE_CASE__ : List[str]=80 , SCREAMING_SNAKE_CASE__ : Tuple=6 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=15_36 , SCREAMING_SNAKE_CASE__ : Optional[Any]=15_36 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=5_02_57 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=2_56 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE__ : Dict=0.0 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Any=15_00 , SCREAMING_SNAKE_CASE__ : Optional[int]=4_48 , SCREAMING_SNAKE_CASE__ : Dict=5_02_56 , SCREAMING_SNAKE_CASE__ : List[str]=5_02_56 , SCREAMING_SNAKE_CASE__ : List[str]=5_02_56 , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : int=[2_20, 5_02_56] , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Tuple=2_56 , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0_5 , SCREAMING_SNAKE_CASE__ : Optional[int]=10 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE__ : Any=10 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : Tuple=7 , **SCREAMING_SNAKE_CASE__ : int , ) -> Dict: __lowerCAmelCase = vocab_size __lowerCAmelCase = num_mel_bins __lowerCAmelCase = d_model __lowerCAmelCase = encoder_layers __lowerCAmelCase = encoder_attention_heads __lowerCAmelCase = decoder_layers __lowerCAmelCase = decoder_attention_heads __lowerCAmelCase = decoder_ffn_dim __lowerCAmelCase = encoder_ffn_dim __lowerCAmelCase = dropout __lowerCAmelCase = attention_dropout __lowerCAmelCase = activation_dropout __lowerCAmelCase = activation_function __lowerCAmelCase = init_std __lowerCAmelCase = encoder_layerdrop __lowerCAmelCase = decoder_layerdrop __lowerCAmelCase = use_cache __lowerCAmelCase = encoder_layers __lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True __lowerCAmelCase = max_source_positions __lowerCAmelCase = max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. __lowerCAmelCase = classifier_proj_size __lowerCAmelCase = use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __lowerCAmelCase = apply_spec_augment __lowerCAmelCase = mask_time_prob __lowerCAmelCase = mask_time_length __lowerCAmelCase = mask_time_min_masks __lowerCAmelCase = mask_feature_prob __lowerCAmelCase = mask_feature_length __lowerCAmelCase = mask_feature_min_masks __lowerCAmelCase = median_filter_width super().__init__( pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , suppress_tokens=SCREAMING_SNAKE_CASE__ , begin_suppress_tokens=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) class _lowercase ( UpperCAmelCase__ ): '''simple docstring''' @property def a ( self : Tuple ) -> Mapping[str, Mapping[int, str]]: __lowerCAmelCase = OrderedDict( [ ("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}), ] ) if self.use_past: __lowerCAmelCase = {0: """batch"""} else: __lowerCAmelCase = {0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ , direction="""inputs""" ) return common_inputs def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional["TensorType"] = None , SCREAMING_SNAKE_CASE__ : int = 2_20_50 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : int = 2_20 , ) -> Mapping[str, Any]: __lowerCAmelCase = OrderedDict() __lowerCAmelCase = OnnxConfig.generate_dummy_inputs( self , preprocessor=preprocessor.feature_extractor , batch_size=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ , sampling_rate=SCREAMING_SNAKE_CASE__ , time_duration=SCREAMING_SNAKE_CASE__ , frequency=SCREAMING_SNAKE_CASE__ , ) __lowerCAmelCase = encoder_inputs["""input_features"""].shape[2] __lowerCAmelCase = encoder_sequence_length // 2 if self.use_past else seq_length __lowerCAmelCase = super().generate_dummy_inputs( preprocessor.tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = encoder_inputs.pop("""input_features""" ) __lowerCAmelCase = decoder_inputs.pop("""decoder_input_ids""" ) if "past_key_values" in decoder_inputs: __lowerCAmelCase = decoder_inputs.pop("""past_key_values""" ) return dummy_inputs @property def a ( self : Optional[int] ) -> float: return 1e-3
330
1
"""simple docstring""" # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class a ( a__ , a__ , a__ , unittest.TestCase ): snake_case__ = StableDiffusionControlNetImgaImgPipeline snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS snake_case__ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} ) snake_case__ = IMAGE_TO_IMAGE_IMAGE_PARAMS def UpperCamelCase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) torch.manual_seed(0 ) lowerCAmelCase = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) torch.manual_seed(0 ) lowerCAmelCase = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , ) torch.manual_seed(0 ) lowerCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) lowerCAmelCase = CLIPTextModel(_snake_case ) lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) lowerCAmelCase = { 'unet': unet, 'controlnet': controlnet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ): """simple docstring""" if str(_snake_case ).startswith('mps' ): lowerCAmelCase = torch.manual_seed(_snake_case ) else: lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) lowerCAmelCase = 2 lowerCAmelCase = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case ) , ) lowerCAmelCase = floats_tensor(control_image.shape , rng=random.Random(_snake_case ) ).to(_snake_case ) lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase = Image.fromarray(np.uinta(_snake_case ) ).convert('RGB' ).resize((64, 64) ) lowerCAmelCase = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', 'image': image, 'control_image': control_image, } return inputs def UpperCamelCase__ ( self ): """simple docstring""" return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) class a ( a__ , a__ , unittest.TestCase ): snake_case__ = StableDiffusionControlNetImgaImgPipeline snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS snake_case__ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def UpperCamelCase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) torch.manual_seed(0 ) def init_weights(_snake_case ): if isinstance(_snake_case , torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) lowerCAmelCase = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(_snake_case ) torch.manual_seed(0 ) lowerCAmelCase = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(_snake_case ) torch.manual_seed(0 ) lowerCAmelCase = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , ) torch.manual_seed(0 ) lowerCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) lowerCAmelCase = CLIPTextModel(_snake_case ) lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) lowerCAmelCase = MultiControlNetModel([controlneta, controlneta] ) lowerCAmelCase = { 'unet': unet, 'controlnet': controlnet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ): """simple docstring""" if str(_snake_case ).startswith('mps' ): lowerCAmelCase = torch.manual_seed(_snake_case ) else: lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) lowerCAmelCase = 2 lowerCAmelCase = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case ) , ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case ) , ), ] lowerCAmelCase = floats_tensor(control_image[0].shape , rng=random.Random(_snake_case ) ).to(_snake_case ) lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase = Image.fromarray(np.uinta(_snake_case ) ).convert('RGB' ).resize((64, 64) ) lowerCAmelCase = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', 'image': image, 'control_image': control_image, } return inputs def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.get_dummy_components() lowerCAmelCase = self.pipeline_class(**_snake_case ) pipe.to(_snake_case ) lowerCAmelCase = 10.0 lowerCAmelCase = 4 lowerCAmelCase = self.get_dummy_inputs(_snake_case ) lowerCAmelCase = steps lowerCAmelCase = scale lowerCAmelCase = pipe(**_snake_case )[0] lowerCAmelCase = self.get_dummy_inputs(_snake_case ) lowerCAmelCase = steps lowerCAmelCase = scale lowerCAmelCase = pipe(**_snake_case , control_guidance_start=0.1 , control_guidance_end=0.2 )[0] lowerCAmelCase = self.get_dummy_inputs(_snake_case ) lowerCAmelCase = steps lowerCAmelCase = scale lowerCAmelCase = pipe(**_snake_case , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0] lowerCAmelCase = self.get_dummy_inputs(_snake_case ) lowerCAmelCase = steps lowerCAmelCase = scale lowerCAmelCase = pipe(**_snake_case , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 def UpperCamelCase__ ( self ): """simple docstring""" return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.get_dummy_components() lowerCAmelCase = self.pipeline_class(**_snake_case ) pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(_snake_case ) except NotImplementedError: pass @slow @require_torch_gpu class a ( unittest.TestCase ): def UpperCamelCase__ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' ) lowerCAmelCase = StableDiffusionControlNetImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , safety_checker=_snake_case , controlnet=_snake_case ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=_snake_case ) lowerCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 ) lowerCAmelCase = 'evil space-punk bird' lowerCAmelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((5_12, 5_12) ) lowerCAmelCase = load_image( 'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((5_12, 5_12) ) lowerCAmelCase = pipe( _snake_case , _snake_case , control_image=_snake_case , generator=_snake_case , output_type='np' , num_inference_steps=50 , strength=0.6 , ) lowerCAmelCase = output.images[0] assert image.shape == (5_12, 5_12, 3) lowerCAmelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' ) assert np.abs(expected_image - image ).max() < 9E-2
4
import requests SCREAMING_SNAKE_CASE :List[str] = 'YOUR API KEY' def UpperCAmelCase ( a_ , a_ = giphy_api_key ) -> list: """simple docstring""" __A = "+".join(query.split() ) __A = F'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}''' __A = requests.get(a_ ).json()["data"] return [gif["url"] for gif in gifs] if __name__ == "__main__": print('\n'.join(get_gifs('space ship')))
55
0
"""simple docstring""" from typing import Callable, List, Optional, Union import PIL import torch from transformers import ( CLIPImageProcessor, CLIPSegForImageSegmentation, CLIPSegProcessor, CLIPTextModel, CLIPTokenizer, ) from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, is_accelerate_available, logging __lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name class __A ( SCREAMING_SNAKE_CASE_ ): def __init__( self : List[Any] , __snake_case : CLIPSegForImageSegmentation , __snake_case : CLIPSegProcessor , __snake_case : AutoencoderKL , __snake_case : CLIPTextModel , __snake_case : CLIPTokenizer , __snake_case : UNetaDConditionModel , __snake_case : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __snake_case : StableDiffusionSafetyChecker , __snake_case : CLIPImageProcessor , ) -> List[str]: super().__init__() if hasattr(scheduler.config , """steps_offset""" ) and scheduler.config.steps_offset != 1: __magic_name__: str = ( F'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`' F' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure ' """to update the config accordingly as leaving `steps_offset` might led to incorrect results""" """ in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,""" """ it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`""" """ file""" ) deprecate("""steps_offset!=1""" , """1.0.0""" , __snake_case , standard_warn=__snake_case ) __magic_name__: Optional[int] = dict(scheduler.config ) __magic_name__: List[Any] = 1 __magic_name__: Any = FrozenDict(__snake_case ) if hasattr(scheduler.config , """skip_prk_steps""" ) and scheduler.config.skip_prk_steps is False: __magic_name__: Tuple = ( F'The configuration file of this scheduler: {scheduler} has not set the configuration' """ `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make""" """ sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to""" """ incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face""" """ Hub, it would be very nice if you could open a Pull request for the""" """ `scheduler/scheduler_config.json` file""" ) deprecate("""skip_prk_steps not set""" , """1.0.0""" , __snake_case , standard_warn=__snake_case ) __magic_name__: Union[str, Any] = dict(scheduler.config ) __magic_name__: str = True __magic_name__: Dict = FrozenDict(__snake_case ) if safety_checker is None: logger.warning( F'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure' """ that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered""" """ results in services or applications open to the public. Both the diffusers team and Hugging Face""" """ strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling""" """ it only for use-cases that involve analyzing network behavior or auditing its results. For more""" """ information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" ) self.register_modules( segmentation_model=__snake_case , segmentation_processor=__snake_case , vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , unet=__snake_case , scheduler=__snake_case , safety_checker=__snake_case , feature_extractor=__snake_case , ) def lowerCamelCase__ ( self : Optional[int] , __snake_case : Optional[Union[str, int]] = "auto" ) -> Optional[int]: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory __magic_name__: Any = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__snake_case ) def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]: self.enable_attention_slicing(__snake_case ) def lowerCamelCase__ ( self : Any ) -> Union[str, Any]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) __magic_name__: List[Any] = torch.device("""cuda""" ) for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]: if cpu_offloaded_model is not None: cpu_offload(__snake_case , __snake_case ) @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def lowerCamelCase__ ( self : int ) -> int: if self.device != torch.device("""meta""" ) or not hasattr(self.unet , """_hf_hook""" ): return self.device for module in self.unet.modules(): if ( hasattr(__snake_case , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() def __call__( self : Optional[Any] , __snake_case : Union[str, List[str]] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : str , __snake_case : int = 5_1_2 , __snake_case : int = 5_1_2 , __snake_case : int = 5_0 , __snake_case : float = 7.5 , __snake_case : Optional[Union[str, List[str]]] = None , __snake_case : Optional[int] = 1 , __snake_case : float = 0.0 , __snake_case : Optional[torch.Generator] = None , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , __snake_case : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __snake_case : int = 1 , **__snake_case : Optional[int] , ) -> List[Any]: __magic_name__: int = self.segmentation_processor( text=[text] , images=[image] , padding="""max_length""" , return_tensors="""pt""" ).to(self.device ) __magic_name__: Any = self.segmentation_model(**__snake_case ) __magic_name__: Any = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy() __magic_name__: List[str] = self.numpy_to_pil(__snake_case )[0].resize(image.size ) # Run inpainting pipeline with the generated mask __magic_name__: int = StableDiffusionInpaintPipeline( vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , ) return inpainting_pipeline( prompt=__snake_case , image=__snake_case , mask_image=__snake_case , height=__snake_case , width=__snake_case , num_inference_steps=__snake_case , guidance_scale=__snake_case , negative_prompt=__snake_case , num_images_per_prompt=__snake_case , eta=__snake_case , generator=__snake_case , latents=__snake_case , output_type=__snake_case , return_dict=__snake_case , callback=__snake_case , callback_steps=__snake_case , )
715
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer __lowerCamelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __lowerCamelCase = { 'vocab_file': { 'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt', }, 'tokenizer_file': { 'unc-nlp/lxmert-base-uncased': ( 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json' ), }, } __lowerCamelCase = { 'unc-nlp/lxmert-base-uncased': 5_12, } __lowerCamelCase = { 'unc-nlp/lxmert-base-uncased': {'do_lower_case': True}, } class __A ( SCREAMING_SNAKE_CASE_ ): UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = LxmertTokenizer def __init__( self : Union[str, Any] , __snake_case : Optional[Any]=None , __snake_case : Union[str, Any]=None , __snake_case : Tuple=True , __snake_case : Optional[int]="[UNK]" , __snake_case : Tuple="[SEP]" , __snake_case : int="[PAD]" , __snake_case : Optional[Any]="[CLS]" , __snake_case : int="[MASK]" , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=None , **__snake_case : List[str] , ) -> Optional[int]: super().__init__( __snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , tokenize_chinese_chars=__snake_case , strip_accents=__snake_case , **__snake_case , ) __magic_name__: List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , __snake_case ) != do_lower_case or normalizer_state.get("""strip_accents""" , __snake_case ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , __snake_case ) != tokenize_chinese_chars ): __magic_name__: List[Any] = getattr(__snake_case , normalizer_state.pop("""type""" ) ) __magic_name__: Dict = do_lower_case __magic_name__: List[Any] = strip_accents __magic_name__: List[str] = tokenize_chinese_chars __magic_name__: Tuple = normalizer_class(**__snake_case ) __magic_name__: Tuple = do_lower_case def lowerCamelCase__ ( self : List[Any] , __snake_case : str , __snake_case : int=None ) -> List[str]: __magic_name__: int = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCamelCase__ ( self : int , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]: __magic_name__: List[str] = [self.sep_token_id] __magic_name__: List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]: __magic_name__: List[str] = self._tokenizer.model.save(__snake_case , name=__snake_case ) return tuple(__snake_case )
213
0
from __future__ import annotations def A ( lowercase__ : list[float] ) -> float: UpperCamelCase__ :Optional[int] = 0.00 UpperCamelCase__ :Tuple = 0 for resistor in resistors: if resistor <= 0: UpperCamelCase__ :List[Any] = f"""Resistor at index {index} has a negative or zero value!""" raise ValueError(lowercase__ ) first_sum += 1 / float(lowercase__ ) index += 1 return 1 / first_sum def A ( lowercase__ : list[float] ) -> float: UpperCamelCase__ :str = 0.00 UpperCamelCase__ :int = 0 for resistor in resistors: sum_r += resistor if resistor < 0: UpperCamelCase__ :Optional[Any] = f"""Resistor at index {index} has a negative value!""" raise ValueError(lowercase__ ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
45
'''simple docstring''' from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. UpperCamelCase_ = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. UpperCamelCase_ = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. UpperCamelCase_ = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, float]: lowercase : int =len([g for position, g in enumerate(__magic_name__ ) if g == main_target[position]] ) return (item, float(__magic_name__ )) def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, str]: lowercase : Any =random.randint(0 , len(__magic_name__ ) - 1 ) lowercase : Tuple =parent_a[:random_slice] + parent_a[random_slice:] lowercase : List[str] =parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] ) -> str: lowercase : Union[str, Any] =list(__magic_name__ ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: lowercase : Dict =random.choice(__magic_name__ ) return "".join(__magic_name__ ) def _lowerCAmelCase ( __magic_name__ : tuple[str, float] , __magic_name__ : list[tuple[str, float]] , __magic_name__ : list[str] , ) -> list[str]: lowercase : Any =[] # Generate more children proportionally to the fitness score. lowercase : Dict =int(parent_a[1] * 100 ) + 1 lowercase : List[str] =10 if child_n >= 10 else child_n for _ in range(__magic_name__ ): lowercase : List[str] =population_score[random.randint(0 , __magic_name__ )][0] lowercase , lowercase : Dict =crossover(parent_a[0] , __magic_name__ ) # Append new string to the population list. pop.append(mutate(__magic_name__ , __magic_name__ ) ) pop.append(mutate(__magic_name__ , __magic_name__ ) ) return pop def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] , __magic_name__ : bool = True ) -> tuple[int, int, str]: # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: lowercase : List[str] =f'''{N_POPULATION} must be bigger than {N_SELECTED}''' raise ValueError(__magic_name__ ) # Verify that the target contains no genes besides the ones inside genes variable. lowercase : Optional[int] =sorted({c for c in target if c not in genes} ) if not_in_genes_list: lowercase : Dict =f'''{not_in_genes_list} is not in genes list, evolution cannot converge''' raise ValueError(__magic_name__ ) # Generate random starting population. lowercase : int =[] for _ in range(__magic_name__ ): population.append(''''''.join([random.choice(__magic_name__ ) for i in range(len(__magic_name__ ) )] ) ) # Just some logs to know what the algorithms is doing. lowercase , lowercase : Optional[int] =0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(__magic_name__ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. lowercase : List[str] =[evaluate(__magic_name__ , __magic_name__ ) for item in population] # Check if there is a matching evolution. lowercase : int =sorted(__magic_name__ , key=lambda __magic_name__ : x[1] , reverse=__magic_name__ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( f'''\nGeneration: {generation}''' f'''\nTotal Population:{total_population}''' f'''\nBest score: {population_score[0][1]}''' f'''\nBest string: {population_score[0][0]}''' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. lowercase : Any =population[: int(N_POPULATION / 3 )] population.clear() population.extend(__magic_name__ ) # Normalize population score to be between 0 and 1. lowercase : Dict =[ (item, score / len(__magic_name__ )) for item, score in population_score ] # This is selection for i in range(__magic_name__ ): population.extend(select(population_score[int(__magic_name__ )] , __magic_name__ , __magic_name__ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(__magic_name__ ) > N_POPULATION: break if __name__ == "__main__": UpperCamelCase_ = ( """This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!""" ) UpperCamelCase_ = list( """ ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm""" """nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\""" ) UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = basic(target_str, genes_list) print( f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}''' )
92
0
'''simple docstring''' import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) UpperCAmelCase__ :Union[str, Any] = logging.getLogger(__name__) def __lowercase (_lowercase, _lowercase ) -> Any: """simple docstring""" __lowerCamelCase : Tuple = np.argmax(_lowercase, axis=1 ) return np.sum(outputs == labels ) def __lowercase (_lowercase ) -> Optional[int]: """simple docstring""" with open(_lowercase, encoding="""utf_8""" ) as f: __lowerCamelCase : Dict = csv.reader(_lowercase ) __lowerCamelCase : Any = [] next(_lowercase ) # skip the first line for line in tqdm(_lowercase ): output.append((""" """.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def __lowercase (_lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) -> Tuple: """simple docstring""" __lowerCamelCase : Dict = [] for dataset in encoded_datasets: __lowerCamelCase : int = len(_lowercase ) __lowerCamelCase : Any = np.zeros((n_batch, 2, input_len), dtype=np.intaa ) __lowerCamelCase : int = np.zeros((n_batch, 2), dtype=np.intaa ) __lowerCamelCase : int = np.full((n_batch, 2, input_len), fill_value=-100, dtype=np.intaa ) __lowerCamelCase : List[str] = np.zeros((n_batch,), dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(_lowercase ): __lowerCamelCase : List[str] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] __lowerCamelCase : int = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] __lowerCamelCase : Tuple = with_conta __lowerCamelCase : str = with_conta __lowerCamelCase : List[Any] = len(_lowercase ) - 1 __lowerCamelCase : Tuple = len(_lowercase ) - 1 __lowerCamelCase : List[str] = with_conta __lowerCamelCase : List[str] = with_conta __lowerCamelCase : Dict = mc_label __lowerCamelCase : Optional[Any] = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(_lowercase ) for t in all_inputs ) ) return tensor_datasets def __lowercase () -> List[Any]: """simple docstring""" __lowerCamelCase : Any = argparse.ArgumentParser() parser.add_argument("""--model_name""", type=_lowercase, default="""openai-gpt""", help="""pretrained model name""" ) parser.add_argument("""--do_train""", action="""store_true""", help="""Whether to run training.""" ) parser.add_argument("""--do_eval""", action="""store_true""", help="""Whether to run eval on the dev set.""" ) parser.add_argument( """--output_dir""", default=_lowercase, type=_lowercase, required=_lowercase, help="""The output directory where the model predictions and checkpoints will be written.""", ) parser.add_argument("""--train_dataset""", type=_lowercase, default="""""" ) parser.add_argument("""--eval_dataset""", type=_lowercase, default="""""" ) parser.add_argument("""--seed""", type=_lowercase, default=42 ) parser.add_argument("""--num_train_epochs""", type=_lowercase, default=3 ) parser.add_argument("""--train_batch_size""", type=_lowercase, default=8 ) parser.add_argument("""--eval_batch_size""", type=_lowercase, default=16 ) parser.add_argument("""--adam_epsilon""", default=1e-8, type=_lowercase, help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--max_grad_norm""", type=_lowercase, default=1 ) parser.add_argument( """--max_steps""", default=-1, type=_lowercase, help=( """If > 0: set total number of training steps to perform. Override num_train_epochs.""" ), ) parser.add_argument( """--gradient_accumulation_steps""", type=_lowercase, default=1, help="""Number of updates steps to accumulate before performing a backward/update pass.""", ) parser.add_argument("""--learning_rate""", type=_lowercase, default=6.25e-5 ) parser.add_argument("""--warmup_steps""", default=0, type=_lowercase, help="""Linear warmup over warmup_steps.""" ) parser.add_argument("""--lr_schedule""", type=_lowercase, default="""warmup_linear""" ) parser.add_argument("""--weight_decay""", type=_lowercase, default=0.01 ) parser.add_argument("""--lm_coef""", type=_lowercase, default=0.9 ) parser.add_argument("""--n_valid""", type=_lowercase, default=374 ) parser.add_argument("""--server_ip""", type=_lowercase, default="""""", help="""Can be used for distant debugging.""" ) parser.add_argument("""--server_port""", type=_lowercase, default="""""", help="""Can be used for distant debugging.""" ) __lowerCamelCase : List[Any] = parser.parse_args() print(_lowercase ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("""Waiting for debugger attach""" ) ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=_lowercase ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) __lowerCamelCase : List[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) __lowerCamelCase : int = torch.cuda.device_count() logger.info("""device: {}, n_gpu {}""".format(_lowercase, _lowercase ) ) if not args.do_train and not args.do_eval: raise ValueError("""At least one of `do_train` or `do_eval` must be True.""" ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset __lowerCamelCase : Optional[Any] = ["""_start_""", """_delimiter_""", """_classify_"""] __lowerCamelCase : str = OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(_lowercase ) __lowerCamelCase : Tuple = tokenizer.convert_tokens_to_ids(_lowercase ) __lowerCamelCase : Optional[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(_lowercase ) ) model.to(_lowercase ) # Load and encode the datasets def tokenize_and_encode(_lowercase ): if isinstance(_lowercase, _lowercase ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_lowercase ) ) elif isinstance(_lowercase, _lowercase ): return obj return [tokenize_and_encode(_lowercase ) for o in obj] logger.info("""Encoding dataset...""" ) __lowerCamelCase : Any = load_rocstories_dataset(args.train_dataset ) __lowerCamelCase : Union[str, Any] = load_rocstories_dataset(args.eval_dataset ) __lowerCamelCase : Union[str, Any] = (train_dataset, eval_dataset) __lowerCamelCase : List[str] = tokenize_and_encode(_lowercase ) # Compute the max input length for the Transformer __lowerCamelCase : Optional[Any] = model.config.n_positions // 2 - 2 __lowerCamelCase : int = max( len(story[:max_length] ) + max(len(conta[:max_length] ), len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) __lowerCamelCase : Dict = min(_lowercase, model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders __lowerCamelCase : Optional[int] = pre_process_datasets(_lowercase, _lowercase, _lowercase, *_lowercase ) __lowerCamelCase : List[Any] = tensor_datasets[0], tensor_datasets[1] __lowerCamelCase : List[str] = TensorDataset(*_lowercase ) __lowerCamelCase : Union[str, Any] = RandomSampler(_lowercase ) __lowerCamelCase : List[str] = DataLoader(_lowercase, sampler=_lowercase, batch_size=args.train_batch_size ) __lowerCamelCase : List[Any] = TensorDataset(*_lowercase ) __lowerCamelCase : Union[str, Any] = SequentialSampler(_lowercase ) __lowerCamelCase : Union[str, Any] = DataLoader(_lowercase, sampler=_lowercase, batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: __lowerCamelCase : Union[str, Any] = args.max_steps __lowerCamelCase : Tuple = args.max_steps // (len(_lowercase ) // args.gradient_accumulation_steps) + 1 else: __lowerCamelCase : Optional[int] = len(_lowercase ) // args.gradient_accumulation_steps * args.num_train_epochs __lowerCamelCase : Tuple = list(model.named_parameters() ) __lowerCamelCase : Optional[int] = ["""bias""", """LayerNorm.bias""", """LayerNorm.weight"""] __lowerCamelCase : int = [ { """params""": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], """weight_decay""": args.weight_decay, }, {"""params""": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], """weight_decay""": 0.0}, ] __lowerCamelCase : Union[str, Any] = AdamW(_lowercase, lr=args.learning_rate, eps=args.adam_epsilon ) __lowerCamelCase : Any = get_linear_schedule_with_warmup( _lowercase, num_warmup_steps=args.warmup_steps, num_training_steps=_lowercase ) if args.do_train: __lowerCamelCase : Optional[int] = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs ), desc="""Epoch""" ): __lowerCamelCase : Tuple = 0 __lowerCamelCase : Union[str, Any] = 0 __lowerCamelCase : Any = tqdm(_lowercase, desc="""Training""" ) for step, batch in enumerate(_lowercase ): __lowerCamelCase : Dict = tuple(t.to(_lowercase ) for t in batch ) __lowerCamelCase : Union[str, Any] = batch __lowerCamelCase : Union[str, Any] = model(_lowercase, mc_token_ids=_lowercase, lm_labels=_lowercase, mc_labels=_lowercase ) __lowerCamelCase : Optional[Any] = args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() __lowerCamelCase : Dict = ( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 __lowerCamelCase : int = """Training loss: {:.2e} lr: {:.2e}""".format(_lowercase, scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer __lowerCamelCase : int = model.module if hasattr(_lowercase, """module""" ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` __lowerCamelCase : str = os.path.join(args.output_dir, _lowercase ) __lowerCamelCase : List[Any] = os.path.join(args.output_dir, _lowercase ) torch.save(model_to_save.state_dict(), _lowercase ) model_to_save.config.to_json_file(_lowercase ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned __lowerCamelCase : Any = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) __lowerCamelCase : int = OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(_lowercase ) if args.do_eval: model.eval() __lowerCamelCase : Tuple = 0, 0 __lowerCamelCase : int = 0, 0 for batch in tqdm(_lowercase, desc="""Evaluating""" ): __lowerCamelCase : List[str] = tuple(t.to(_lowercase ) for t in batch ) __lowerCamelCase : int = batch with torch.no_grad(): __lowerCamelCase : Union[str, Any] = model( _lowercase, mc_token_ids=_lowercase, lm_labels=_lowercase, mc_labels=_lowercase ) __lowerCamelCase : str = mc_logits.detach().cpu().numpy() __lowerCamelCase : str = mc_labels.to("""cpu""" ).numpy() __lowerCamelCase : Optional[int] = accuracy(_lowercase, _lowercase ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 __lowerCamelCase : List[Any] = eval_loss / nb_eval_steps __lowerCamelCase : str = eval_accuracy / nb_eval_examples __lowerCamelCase : List[Any] = tr_loss / nb_tr_steps if args.do_train else None __lowerCamelCase : str = {"""eval_loss""": eval_loss, """eval_accuracy""": eval_accuracy, """train_loss""": train_loss} __lowerCamelCase : Optional[Any] = os.path.join(args.output_dir, """eval_results.txt""" ) with open(_lowercase, """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key in sorted(result.keys() ): logger.info(""" %s = %s""", _lowercase, str(result[key] ) ) writer.write("""%s = %s\n""" % (key, str(result[key] )) ) if __name__ == "__main__": main()
701
'''simple docstring''' from statistics import mean import numpy as np def __lowercase (_lowercase, _lowercase, _lowercase, _lowercase ) -> list: """simple docstring""" __lowerCamelCase : str = 0 # Number of processes finished __lowerCamelCase : List[Any] = 0 # Displays the finished process. # If it is 0, the performance is completed if it is 1, before the performance. __lowerCamelCase : int = [0] * no_of_process # List to include calculation results __lowerCamelCase : Optional[int] = [0] * no_of_process # Sort by arrival time. __lowerCamelCase : int = [burst_time[i] for i in np.argsort(_lowercase )] __lowerCamelCase : Union[str, Any] = [process_name[i] for i in np.argsort(_lowercase )] arrival_time.sort() while no_of_process > finished_process_count: __lowerCamelCase : Tuple = 0 while finished_process[i] == 1: i += 1 if current_time < arrival_time[i]: __lowerCamelCase : List[str] = arrival_time[i] __lowerCamelCase : Dict = 0 # Index showing the location of the process being performed __lowerCamelCase : Optional[Any] = 0 # Saves the current response ratio. __lowerCamelCase : List[str] = 0 for i in range(0, _lowercase ): if finished_process[i] == 0 and arrival_time[i] <= current_time: __lowerCamelCase : Optional[int] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[ i ] if response_ratio < temp: __lowerCamelCase : List[Any] = temp __lowerCamelCase : str = i # Calculate the turn around time __lowerCamelCase : Optional[Any] = current_time + burst_time[loc] - arrival_time[loc] current_time += burst_time[loc] # Indicates that the process has been performed. __lowerCamelCase : Optional[Any] = 1 # Increase finished_process_count by 1 finished_process_count += 1 return turn_around_time def __lowercase (_lowercase, _lowercase, _lowercase, _lowercase ) -> list: """simple docstring""" __lowerCamelCase : List[Any] = [0] * no_of_process for i in range(0, _lowercase ): __lowerCamelCase : Tuple = turn_around_time[i] - burst_time[i] return waiting_time if __name__ == "__main__": UpperCAmelCase__ :List[Any] = 5 UpperCAmelCase__ :Tuple = ["""A""", """B""", """C""", """D""", """E"""] UpperCAmelCase__ :Tuple = [1, 2, 3, 4, 5] UpperCAmelCase__ :List[Any] = [1, 2, 3, 4, 5] UpperCAmelCase__ :Union[str, Any] = calculate_turn_around_time( process_name, arrival_time, burst_time, no_of_process ) UpperCAmelCase__ :Optional[int] = calculate_waiting_time( process_name, turn_around_time, burst_time, no_of_process ) print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""") for i in range(0, no_of_process): print( f'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t''' f'''{turn_around_time[i]}\t\t\t{waiting_time[i]}''' ) print(f'''average waiting time : {mean(waiting_time):.5f}''') print(f'''average turn around time : {mean(turn_around_time):.5f}''')
483
0
'''simple docstring''' from collections.abc import Generator from math import sin def lowerCamelCase_ ( A_ ): if len(A_ ) != 32: raise ValueError('''Input must be of length 32''' ) __lowerCamelCase = B'''''' for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def lowerCamelCase_ ( A_ ): if i < 0: raise ValueError('''Input must be non-negative''' ) __lowerCamelCase = format(A_ , '''08x''' )[-8:] __lowerCamelCase = B'''''' for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' ) return little_endian_hex def lowerCamelCase_ ( A_ ): __lowerCamelCase = B'''''' for char in message: bit_string += format(A_ , '''08b''' ).encode('''utf-8''' ) __lowerCamelCase = format(len(A_ ) , '''064b''' ).encode('''utf-8''' ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(A_ ) % 5_12 != 4_48: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def lowerCamelCase_ ( A_ ): if len(A_ ) % 5_12 != 0: raise ValueError('''Input must have length that\'s a multiple of 512''' ) for pos in range(0 , len(A_ ) , 5_12 ): __lowerCamelCase = bit_string[pos : pos + 5_12] __lowerCamelCase = [] for i in range(0 , 5_12 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def lowerCamelCase_ ( A_ ): if i < 0: raise ValueError('''Input must be non-negative''' ) __lowerCamelCase = format(A_ , '''032b''' ) __lowerCamelCase = '''''' for c in i_str: new_str += "1" if c == "0" else "0" return int(A_ , 2 ) def lowerCamelCase_ ( A_ , A_ ): return (a + b) % 2**32 def lowerCamelCase_ ( A_ , A_ ): if i < 0: raise ValueError('''Input must be non-negative''' ) if shift < 0: raise ValueError('''Shift must be non-negative''' ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def lowerCamelCase_ ( A_ ): __lowerCamelCase = preprocess(A_ ) __lowerCamelCase = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states __lowerCamelCase = 0X6745_2301 __lowerCamelCase = 0Xefcd_ab89 __lowerCamelCase = 0X98ba_dcfe __lowerCamelCase = 0X1032_5476 __lowerCamelCase = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(A_ ): __lowerCamelCase = aa __lowerCamelCase = ba __lowerCamelCase = ca __lowerCamelCase = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f __lowerCamelCase = d ^ (b & (c ^ d)) __lowerCamelCase = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f __lowerCamelCase = c ^ (d & (b ^ c)) __lowerCamelCase = (5 * i + 1) % 16 elif i <= 47: __lowerCamelCase = b ^ c ^ d __lowerCamelCase = (3 * i + 5) % 16 else: __lowerCamelCase = c ^ (b | not_aa(A_ )) __lowerCamelCase = (7 * i) % 16 __lowerCamelCase = (f + a + added_consts[i] + block_words[g]) % 2**32 __lowerCamelCase = d __lowerCamelCase = c __lowerCamelCase = b __lowerCamelCase = sum_aa(A_ , left_rotate_aa(A_ , shift_amounts[i] ) ) # Add hashed chunk to running total __lowerCamelCase = sum_aa(A_ , A_ ) __lowerCamelCase = sum_aa(A_ , A_ ) __lowerCamelCase = sum_aa(A_ , A_ ) __lowerCamelCase = sum_aa(A_ , A_ ) __lowerCamelCase = reformat_hex(A_ ) + reformat_hex(A_ ) + reformat_hex(A_ ) + reformat_hex(A_ ) return digest if __name__ == "__main__": import doctest doctest.testmod()
316
'''simple docstring''' from pathlib import Path import fire def lowerCamelCase_ ( A_ , A_ , A_ ): __lowerCamelCase = Path(A_ ) __lowerCamelCase = Path(A_ ) dest_dir.mkdir(exist_ok=A_ ) for path in src_dir.iterdir(): __lowerCamelCase = [x.rstrip() for x in list(path.open().readlines() )][:n] __lowerCamelCase = dest_dir.joinpath(path.name ) print(A_ ) dest_path.open('''w''' ).write('''\n'''.join(A_ ) ) if __name__ == "__main__": fire.Fire(minify)
316
1
"""simple docstring""" from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar __A = TypeVar("""KEY""") __A = TypeVar("""VAL""") @dataclass(frozen=__a , slots=__a ) class _lowerCAmelCase ( Generic[KEY, VAL] ): """simple docstring""" __magic_name__ :KEY __magic_name__ :VAL class _lowerCAmelCase ( _Item ): """simple docstring""" def __init__( self ): '''simple docstring''' super().__init__(lowerCAmelCase_ , lowerCAmelCase_ ) def __bool__( self ): '''simple docstring''' return False __A = _DeletedItem() class _lowerCAmelCase ( MutableMapping[KEY, VAL] ): """simple docstring""" def __init__( self , __UpperCAmelCase = 8 , __UpperCAmelCase = 0.75 ): '''simple docstring''' lowerCAmelCase__ :Any = initial_block_size lowerCAmelCase__ :List[Any] = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 lowerCAmelCase__ :str = capacity_factor lowerCAmelCase__ :int = 0 def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' return hash(lowerCAmelCase_ ) % len(self._buckets ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' return (ind + 1) % len(self._buckets ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :List[Any] = self._buckets[ind] if not stored: lowerCAmelCase__ :List[str] = _Item(lowerCAmelCase_ , lowerCAmelCase_ ) self._len += 1 return True elif stored.key == key: lowerCAmelCase__ :Any = _Item(lowerCAmelCase_ , lowerCAmelCase_ ) return True else: return False def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = len(self._buckets ) * self._capacity_factor return len(self ) >= int(lowerCAmelCase_ ) def snake_case ( self ): '''simple docstring''' if len(self._buckets ) <= self._initial_block_size: return False lowerCAmelCase__ :Optional[Any] = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :List[Any] = self._buckets lowerCAmelCase__ :List[Any] = [None] * new_size lowerCAmelCase__ :Dict = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def snake_case ( self ): '''simple docstring''' self._resize(len(self._buckets ) * 2 ) def snake_case ( self ): '''simple docstring''' self._resize(len(self._buckets ) // 2 ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :str = self._get_bucket_index(lowerCAmelCase_ ) for _ in range(len(self._buckets ) ): yield ind lowerCAmelCase__ :str = self._get_next_ind(lowerCAmelCase_ ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' for ind in self._iterate_buckets(lowerCAmelCase_ ): if self._try_set(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): break def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if self._is_full(): self._size_up() self._add_item(lowerCAmelCase_ , lowerCAmelCase_ ) def __delitem__( self , __UpperCAmelCase ): '''simple docstring''' for ind in self._iterate_buckets(lowerCAmelCase_ ): lowerCAmelCase__ :List[Any] = self._buckets[ind] if item is None: raise KeyError(lowerCAmelCase_ ) if item is _deleted: continue if item.key == key: lowerCAmelCase__ :List[str] = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self , __UpperCAmelCase ): '''simple docstring''' for ind in self._iterate_buckets(lowerCAmelCase_ ): lowerCAmelCase__ :Any = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(lowerCAmelCase_ ) def __len__( self ): '''simple docstring''' return self._len def __iter__( self ): '''simple docstring''' yield from (item.key for item in self._buckets if item) def __repr__( self ): '''simple docstring''' lowerCAmelCase__ :str = ' ,'.join( F"{item.key}: {item.val}" for item in self._buckets if item ) return F"HashMap({val_string})"
719
"""simple docstring""" from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable __A = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["""GPTNeoXTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ """GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoXForCausalLM""", """GPTNeoXForQuestionAnswering""", """GPTNeoXForSequenceClassification""", """GPTNeoXForTokenClassification""", """GPTNeoXLayer""", """GPTNeoXModel""", """GPTNeoXPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
560
0
'''simple docstring''' from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import PIPELINE_INIT_ARGS, Pipeline lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(__a ) class UpperCamelCase__ ( __a ): def __init__( self : List[Any] , **lowerCamelCase : Union[str, Any] ): '''simple docstring''' super().__init__(**A__ ) if self.framework != "pt": raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' ) # No specific FOR_XXX available yet def __call__( self : List[str] , lowerCamelCase : Dict , **lowerCamelCase : int ): '''simple docstring''' return super().__call__(A__ , **A__ ) def __a ( self : Optional[Any] , **lowerCamelCase : List[str] ): '''simple docstring''' a__ = {} if "candidate_labels" in kwargs: a__ = kwargs["candidate_labels"] if "hypothesis_template" in kwargs: a__ = kwargs["hypothesis_template"] return preprocess_params, {}, {} def __a ( self : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Dict=None , lowerCamelCase : Any="This is a sound of {}." ): '''simple docstring''' if isinstance(A__ , A__ ): if audio.startswith("http://" ) or audio.startswith("https://" ): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png a__ = requests.get(A__ ).content else: with open(A__ , "rb" ) as f: a__ = f.read() if isinstance(A__ , A__ ): a__ = ffmpeg_read(A__ , self.feature_extractor.sampling_rate ) if not isinstance(A__ , np.ndarray ): raise ValueError("We expect a numpy ndarray as input" ) if len(audio.shape ) != 1: raise ValueError("We expect a single channel audio input for ZeroShotAudioClassificationPipeline" ) a__ = self.feature_extractor( [audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="pt" ) a__ = candidate_labels a__ = [hypothesis_template.format(A__ ) for x in candidate_labels] a__ = self.tokenizer(A__ , return_tensors=self.framework , padding=A__ ) a__ = [text_inputs] return inputs def __a ( self : int , lowerCamelCase : List[str] ): '''simple docstring''' a__ = model_inputs.pop("candidate_labels" ) a__ = model_inputs.pop("text_inputs" ) if isinstance(text_inputs[0] , A__ ): a__ = text_inputs[0] else: # Batching case. a__ = text_inputs[0][0] a__ = self.model(**A__ , **A__ ) a__ = { "candidate_labels": candidate_labels, "logits": outputs.logits_per_audio, } return model_outputs def __a ( self : Tuple , lowerCamelCase : Optional[Any] ): '''simple docstring''' a__ = model_outputs.pop("candidate_labels" ) a__ = model_outputs["logits"][0] if self.framework == "pt": a__ = logits.softmax(dim=0 ) a__ = probs.tolist() else: raise ValueError("`tf` framework not supported." ) a__ = [ {"score": score, "label": candidate_label} for score, candidate_label in sorted(zip(A__ , A__ ) , key=lambda lowerCamelCase : -x[0] ) ] return result
489
'''simple docstring''' from __future__ import annotations from functools import lru_cache from math import ceil _lowercase = 100 _lowercase = set(range(3, NUM_PRIMES, 2)) primes.add(2) _lowercase = 42 for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=100 ) def __UpperCamelCase ( a : int ) ->set[int]: if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} snake_case = set() snake_case = 42 snake_case = 42 for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def __UpperCamelCase ( a : int = 5000 ) ->int | None: for number_to_partition in range(1 , a ): if len(partition(a ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(f'{solution() = }')
342
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCAmelCase__ = { "configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"], "tokenization_roc_bert": ["RoCBertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ "ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST", "RoCBertForCausalLM", "RoCBertForMaskedLM", "RoCBertForMultipleChoice", "RoCBertForPreTraining", "RoCBertForQuestionAnswering", "RoCBertForSequenceClassification", "RoCBertForTokenClassification", "RoCBertLayer", "RoCBertModel", "RoCBertPreTrainedModel", "load_tf_weights_in_roc_bert", ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
713
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) UpperCAmelCase__ = logging.getLogger(__name__) @dataclass class a : _snake_case : str = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) _snake_case : Optional[str] = field( default=lowerCAmelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) _snake_case : Optional[str] = field( default=lowerCAmelCase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) _snake_case : Optional[str] = field( default=lowerCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) _snake_case : bool = field(default=lowerCAmelCase_ , metadata={'help': 'Whether tp freeze the encoder.'} ) _snake_case : bool = field(default=lowerCAmelCase_ , metadata={'help': 'Whether to freeze the embeddings.'} ) @dataclass class a : _snake_case : str = field( metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} ) _snake_case : Optional[str] = field( default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , ) _snake_case : Optional[int] = field( default=10_24 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) _snake_case : Optional[int] = field( default=1_28 , metadata={ 'help': ( 'The maximum total sequence length for target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) _snake_case : Optional[int] = field( default=1_42 , metadata={ 'help': ( 'The maximum total sequence length for validation target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded. ' 'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used ' 'during ``evaluate`` and ``predict``.' ) } , ) _snake_case : Optional[int] = field( default=1_42 , metadata={ 'help': ( 'The maximum total sequence length for test target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) _snake_case : Optional[int] = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} ) _snake_case : Optional[int] = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} ) _snake_case : Optional[int] = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} ) _snake_case : Optional[str] = field(default=lowerCAmelCase_ , metadata={'help': 'Source language id for translation.'} ) _snake_case : Optional[str] = field(default=lowerCAmelCase_ , metadata={'help': 'Target language id for translation.'} ) _snake_case : Optional[int] = field(default=lowerCAmelCase_ , metadata={'help': '# num_beams to use for evaluation.'} ) _snake_case : bool = field( default=lowerCAmelCase_ , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , ) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" logger.info(f'''***** {split} metrics *****''' ) for key in sorted(metrics.keys() ): logger.info(f''' {key} = {metrics[key]}''' ) save_json(lowercase ,os.path.join(lowercase ,f'''{split}_results.json''' ) ) def __UpperCAmelCase ( ): """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses() check_output_dir(lowercase ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) ,training_args.fpaa ,) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("""Training/evaluation parameters %s""" ,lowercase ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,) _UpperCAmelCase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""") for p in extra_model_params: if getattr(lowercase ,lowercase ,lowercase ): assert hasattr(lowercase ,lowercase ), f'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute''' setattr(lowercase ,lowercase ,getattr(lowercase ,lowercase ) ) _UpperCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,) _UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path ,from_tf=""".ckpt""" in model_args.model_name_or_path ,config=lowercase ,cache_dir=model_args.cache_dir ,) # use task specific params use_task_specific_params(lowercase ,data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: _UpperCAmelCase = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(lowercase ,(MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(lowercase ,lowercase ): _UpperCAmelCase = tokenizer.lang_code_to_id[data_args.tgt_lang] else: _UpperCAmelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(lowercase ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) _UpperCAmelCase = SeqaSeqDataset # Get datasets _UpperCAmelCase = ( dataset_class( lowercase ,type_path="""train""" ,data_dir=data_args.data_dir ,n_obs=data_args.n_train ,max_target_length=data_args.max_target_length ,max_source_length=data_args.max_source_length ,prefix=model.config.prefix or """""" ,) if training_args.do_train else None ) _UpperCAmelCase = ( dataset_class( lowercase ,type_path="""val""" ,data_dir=data_args.data_dir ,n_obs=data_args.n_val ,max_target_length=data_args.val_max_target_length ,max_source_length=data_args.max_source_length ,prefix=model.config.prefix or """""" ,) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) _UpperCAmelCase = ( dataset_class( lowercase ,type_path="""test""" ,data_dir=data_args.data_dir ,n_obs=data_args.n_test ,max_target_length=data_args.test_max_target_length ,max_source_length=data_args.max_source_length ,prefix=model.config.prefix or """""" ,) if training_args.do_predict else None ) # Initialize our Trainer _UpperCAmelCase = ( build_compute_metrics_fn(data_args.task ,lowercase ) if training_args.predict_with_generate else None ) _UpperCAmelCase = SeqaSeqTrainer( model=lowercase ,args=lowercase ,data_args=lowercase ,train_dataset=lowercase ,eval_dataset=lowercase ,data_collator=SeqaSeqDataCollator( lowercase ,lowercase ,model.config.decoder_start_token_id ,training_args.tpu_num_cores ) ,compute_metrics=lowercase ,tokenizer=lowercase ,) _UpperCAmelCase = {} # Training if training_args.do_train: logger.info("""*** Train ***""" ) _UpperCAmelCase = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) _UpperCAmelCase = train_result.metrics _UpperCAmelCase = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics("""train""" ,lowercase ,training_args.output_dir ) all_metrics.update(lowercase ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir ,"""trainer_state.json""" ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) _UpperCAmelCase = trainer.evaluate(metric_key_prefix="""val""" ) _UpperCAmelCase = data_args.n_val _UpperCAmelCase = round(metrics["""val_loss"""] ,4 ) if trainer.is_world_process_zero(): handle_metrics("""val""" ,lowercase ,training_args.output_dir ) all_metrics.update(lowercase ) if training_args.do_predict: logger.info("""*** Predict ***""" ) _UpperCAmelCase = trainer.predict(test_dataset=lowercase ,metric_key_prefix="""test""" ) _UpperCAmelCase = test_output.metrics _UpperCAmelCase = data_args.n_test if trainer.is_world_process_zero(): _UpperCAmelCase = round(metrics["""test_loss"""] ,4 ) handle_metrics("""test""" ,lowercase ,training_args.output_dir ) all_metrics.update(lowercase ) if training_args.predict_with_generate: _UpperCAmelCase = tokenizer.batch_decode( test_output.predictions ,skip_special_tokens=lowercase ,clean_up_tokenization_spaces=lowercase ) _UpperCAmelCase = lmap(str.strip ,lowercase ) write_txt_file(lowercase ,os.path.join(training_args.output_dir ,"""test_generations.txt""" ) ) if trainer.is_world_process_zero(): save_json(lowercase ,os.path.join(training_args.output_dir ,"""all_results.json""" ) ) return all_metrics def __UpperCAmelCase ( lowercase ): """simple docstring""" # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
275
0
'''simple docstring''' import inspect import os import re from transformers.configuration_utils import PretrainedConfig from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py _lowercase = 'src/transformers' # This is to make sure the transformers module imported is the one in the repo. _lowercase = direct_transformers_import(PATH_TO_TRANSFORMERS) _lowercase = transformers.models.auto.configuration_auto.CONFIG_MAPPING _lowercase = { # used to compute the property `self.chunk_length` 'EncodecConfig': ['overlap'], # used as `self.bert_model = BertModel(config, ...)` 'DPRConfig': True, # not used in modeling files, but it's an important information 'FSMTConfig': ['langs'], # used internally in the configuration class file 'GPTNeoConfig': ['attention_types'], # used internally in the configuration class file 'EsmConfig': ['is_folding_model'], # used during training (despite we don't have training script for these models yet) 'Mask2FormerConfig': ['ignore_value'], # `ignore_value` used during training (despite we don't have training script for these models yet) # `norm` used in conversion script (despite not using in the modeling file) 'OneFormerConfig': ['ignore_value', 'norm'], # used during preprocessing and collation, see `collating_graphormer.py` 'GraphormerConfig': ['spatial_pos_max'], # used internally in the configuration class file 'T5Config': ['feed_forward_proj'], # used internally in the configuration class file # `tokenizer_class` get default value `T5Tokenizer` intentionally 'MT5Config': ['feed_forward_proj', 'tokenizer_class'], 'UMT5Config': ['feed_forward_proj', 'tokenizer_class'], # used internally in the configuration class file 'LongT5Config': ['feed_forward_proj'], # used internally in the configuration class file 'SwitchTransformersConfig': ['feed_forward_proj'], # having default values other than `1e-5` - we can't fix them without breaking 'BioGptConfig': ['layer_norm_eps'], # having default values other than `1e-5` - we can't fix them without breaking 'GLPNConfig': ['layer_norm_eps'], # having default values other than `1e-5` - we can't fix them without breaking 'SegformerConfig': ['layer_norm_eps'], # having default values other than `1e-5` - we can't fix them without breaking 'CvtConfig': ['layer_norm_eps'], # having default values other than `1e-5` - we can't fix them without breaking 'PerceiverConfig': ['layer_norm_eps'], # used internally to calculate the feature size 'InformerConfig': ['num_static_real_features', 'num_time_features'], # used internally to calculate the feature size 'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'], # used internally to calculate the feature size 'AutoformerConfig': ['num_static_real_features', 'num_time_features'], # used internally to calculate `mlp_dim` 'SamVisionConfig': ['mlp_ratio'], # For (head) training, but so far not implemented 'ClapAudioConfig': ['num_classes'], # Not used, but providing useful information to users 'SpeechT5HifiGanConfig': ['sampling_rate'], } # TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure SPECIAL_CASES_TO_ALLOW.update( { 'CLIPSegConfig': True, 'DeformableDetrConfig': True, 'DetaConfig': True, 'DinatConfig': True, 'DonutSwinConfig': True, 'EfficientFormerConfig': True, 'FSMTConfig': True, 'JukeboxConfig': True, 'LayoutLMv2Config': True, 'MaskFormerSwinConfig': True, 'MT5Config': True, 'NatConfig': True, 'OneFormerConfig': True, 'PerceiverConfig': True, 'RagConfig': True, 'SpeechT5Config': True, 'SwinConfig': True, 'Swin2SRConfig': True, 'Swinv2Config': True, 'SwitchTransformersConfig': True, 'TableTransformerConfig': True, 'TapasConfig': True, 'TransfoXLConfig': True, 'UniSpeechConfig': True, 'UniSpeechSatConfig': True, 'WavLMConfig': True, 'WhisperConfig': True, # TODO: @Arthur (for `alignment_head` and `alignment_layer`) 'JukeboxPriorConfig': True, # TODO: @Younes (for `is_decoder`) 'Pix2StructTextConfig': True, } ) def __UpperCamelCase ( a : Dict , a : Optional[int] , a : int , a : Any ) ->Tuple: snake_case = False for attribute in attributes: for modeling_source in source_strings: # check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)` if ( f"""config.{attribute}""" in modeling_source or f"""getattr(config, \"{attribute}\"""" in modeling_source or f"""getattr(self.config, \"{attribute}\"""" in modeling_source ): snake_case = True # Deal with multi-line cases elif ( re.search( Rf"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , a , ) is not None ): snake_case = True # `SequenceSummary` is called with `SequenceSummary(config)` elif attribute in [ "summary_type", "summary_use_proj", "summary_activation", "summary_last_dropout", "summary_proj_to_labels", "summary_first_dropout", ]: if "SequenceSummary" in modeling_source: snake_case = True if attribute_used: break if attribute_used: break # common and important attributes, even if they do not always appear in the modeling files snake_case = [ '''bos_index''', '''eos_index''', '''pad_index''', '''unk_index''', '''mask_index''', '''image_size''', '''use_cache''', '''out_features''', '''out_indices''', ] snake_case = ['''encoder_no_repeat_ngram_size'''] # Special cases to be allowed snake_case = True if not attribute_used: snake_case = False for attribute in attributes: # Allow if the default value in the configuration class is different from the one in `PretrainedConfig` if attribute in ["is_encoder_decoder"] and default_value is True: snake_case = True elif attribute in ["tie_word_embeddings"] and default_value is False: snake_case = True # Allow cases without checking the default value in the configuration class elif attribute in attributes_to_allow + attributes_used_in_generation: snake_case = True elif attribute.endswith('''_token_id''' ): snake_case = True # configuration class specific cases if not case_allowed: snake_case = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] ) snake_case = allowed_cases is True or attribute in allowed_cases return attribute_used or case_allowed def __UpperCamelCase ( a : Dict ) ->Dict: snake_case = dict(inspect.signature(config_class.__init__ ).parameters ) snake_case = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']] snake_case = [signature[param].default for param in parameter_names] # If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long # as one variant is used, the test should pass snake_case = {} if len(config_class.attribute_map ) > 0: snake_case = {v: k for k, v in config_class.attribute_map.items()} # Get the path to modeling source files snake_case = inspect.getsourcefile(a ) snake_case = os.path.dirname(a ) # Let's check against all frameworks: as long as one framework uses an attribute, we are good. snake_case = [os.path.join(a , a ) for fn in os.listdir(a ) if fn.startswith('''modeling_''' )] # Get the source code strings snake_case = [] for path in modeling_paths: if os.path.isfile(a ): with open(a ) as fp: modeling_sources.append(fp.read() ) snake_case = [] for config_param, default_value in zip(a , a ): # `attributes` here is all the variant names for `config_param` snake_case = [config_param] # some configuration classes have non-empty `attribute_map`, and both names could be used in the # corresponding modeling files. As long as one of them appears, it is fine. if config_param in reversed_attribute_map: attributes.append(reversed_attribute_map[config_param] ) if not check_attribute_being_used(a , a , a , a ): unused_attributes.append(attributes[0] ) return sorted(a ) def __UpperCamelCase ( ) ->Any: snake_case = {} for _config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in _config_class.__module__: continue # Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.) snake_case = [ cls for name, cls in inspect.getmembers( inspect.getmodule(_config_class ) , lambda a : inspect.isclass(a ) and issubclass(a , a ) and inspect.getmodule(a ) == inspect.getmodule(_config_class ) , ) ] for config_class in config_classes_in_module: snake_case = check_config_attributes_being_used(a ) if len(a ) > 0: snake_case = unused_attributes if len(a ) > 0: snake_case = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n''' for name, attributes in configs_with_unused_attributes.items(): error += f"""{name}: {attributes}\n""" raise ValueError(a ) if __name__ == "__main__": check_config_attributes()
342
'''simple docstring''' import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class _lowercase ( unittest.TestCase ): def UpperCamelCase ( self ) -> Any: snake_case = '''ylacombe/bark-small''' snake_case = tempfile.mkdtemp() snake_case = '''en_speaker_1''' snake_case = '''This is a test string''' snake_case = '''speaker_embeddings_path.json''' snake_case = '''speaker_embeddings''' def UpperCamelCase ( self , **A__ ) -> int: return AutoTokenizer.from_pretrained(self.checkpoint , **A__ ) def UpperCamelCase ( self ) -> Tuple: shutil.rmtree(self.tmpdirname ) def UpperCamelCase ( self ) -> List[Any]: snake_case = self.get_tokenizer() snake_case = BarkProcessor(tokenizer=A__ ) processor.save_pretrained(self.tmpdirname ) snake_case = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def UpperCamelCase ( self ) -> Tuple: snake_case = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) snake_case = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) snake_case = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def UpperCamelCase ( self ) -> List[Any]: snake_case = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) snake_case = 35 snake_case = 2 snake_case = 8 snake_case = { '''semantic_prompt''': np.ones(A__ ), '''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ), '''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset snake_case = processor(text=self.input_string , voice_preset=A__ ) snake_case = inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(A__ , np.array([] ) ).tolist() ) # test loading voice preset from npz file snake_case = os.path.join(self.tmpdirname , '''file.npz''' ) np.savez(A__ , **A__ ) snake_case = processor(text=self.input_string , voice_preset=A__ ) snake_case = inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(A__ , np.array([] ) ).tolist() ) # test loading voice preset from the hub snake_case = processor(text=self.input_string , voice_preset=self.voice_preset ) def UpperCamelCase ( self ) -> int: snake_case = self.get_tokenizer() snake_case = BarkProcessor(tokenizer=A__ ) snake_case = processor(text=self.input_string ) snake_case = tokenizer( self.input_string , padding='''max_length''' , max_length=2_56 , add_special_tokens=A__ , return_attention_mask=A__ , return_token_type_ids=A__ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
342
1
"""simple docstring""" import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : Tuple = (CMStochasticIterativeScheduler,) snake_case__ : str = 10 def UpperCAmelCase_ ( self : Any , **UpperCAmelCase__ : List[str] ) -> List[str]: __SCREAMING_SNAKE_CASE = { "num_train_timesteps": 2_0_1, "sigma_min": 0.002, "sigma_max": 80.0, } config.update(**UpperCAmelCase__ ) return config def UpperCAmelCase_ ( self : List[str] ) -> str: __SCREAMING_SNAKE_CASE = 1_0 __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = self.scheduler_classes[0](**UpperCAmelCase__ ) scheduler.set_timesteps(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = scheduler.timesteps[0] __SCREAMING_SNAKE_CASE = scheduler.timesteps[1] __SCREAMING_SNAKE_CASE = self.dummy_sample __SCREAMING_SNAKE_CASE = 0.1 * sample __SCREAMING_SNAKE_CASE = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample __SCREAMING_SNAKE_CASE = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]: for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]: for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Tuple ) -> Any: __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = 1 scheduler.set_timesteps(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = scheduler.timesteps __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = self.dummy_model() __SCREAMING_SNAKE_CASE = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(UpperCAmelCase__ ): # 1. scale model input __SCREAMING_SNAKE_CASE = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ ) # 2. predict noise residual __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , UpperCAmelCase__ ) # 3. predict previous sample x_t-1 __SCREAMING_SNAKE_CASE = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample __SCREAMING_SNAKE_CASE = pred_prev_sample __SCREAMING_SNAKE_CASE = torch.sum(torch.abs(UpperCAmelCase__ ) ) __SCREAMING_SNAKE_CASE = torch.mean(torch.abs(UpperCAmelCase__ ) ) assert abs(result_sum.item() - 192.7_614 ) < 1E-2 assert abs(result_mean.item() - 0.2_510 ) < 1E-3 def UpperCAmelCase_ ( self : Any ) -> Tuple: __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = [1_0_6, 0] scheduler.set_timesteps(timesteps=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = scheduler.timesteps __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = self.dummy_model() __SCREAMING_SNAKE_CASE = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input __SCREAMING_SNAKE_CASE = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ ) # 2. predict noise residual __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , UpperCAmelCase__ ) # 3. predict previous sample x_t-1 __SCREAMING_SNAKE_CASE = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample __SCREAMING_SNAKE_CASE = pred_prev_sample __SCREAMING_SNAKE_CASE = torch.sum(torch.abs(UpperCAmelCase__ ) ) __SCREAMING_SNAKE_CASE = torch.mean(torch.abs(UpperCAmelCase__ ) ) assert abs(result_sum.item() - 347.6_357 ) < 1E-2 assert abs(result_mean.item() - 0.4_527 ) < 1E-3 def UpperCAmelCase_ ( self : Tuple ) -> int: __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = [3_9, 3_0, 1_2, 1_5, 0] with self.assertRaises(UpperCAmelCase__ , msg="`timesteps` must be in descending order." ): scheduler.set_timesteps(timesteps=UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Any ) -> Optional[int]: __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = [3_9, 3_0, 1_2, 1, 0] __SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) with self.assertRaises(UpperCAmelCase__ , msg="Can only pass one of `num_inference_steps` or `timesteps`." ): scheduler.set_timesteps(num_inference_steps=UpperCAmelCase__ , timesteps=UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Any ) -> Dict: __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = [scheduler.config.num_train_timesteps] with self.assertRaises( UpperCAmelCase__ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ): scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
553
"""simple docstring""" import warnings from ...utils import logging from .image_processing_videomae import VideoMAEImageProcessor a__ : Dict = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def __init__( self : str , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Tuple ) -> None: warnings.warn( "The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use VideoMAEImageProcessor instead." , UpperCAmelCase__ , ) super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
553
1
import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase_ = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class A (__UpperCAmelCase ,unittest.TestCase ): _SCREAMING_SNAKE_CASE = XLNetTokenizer _SCREAMING_SNAKE_CASE = XLNetTokenizerFast _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = True def __a ( self ) -> Tuple: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing _snake_case : Optional[int] = XLNetTokenizer(lowercase_ , keep_accents=lowercase_ ) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname ) def __a ( self ) -> Optional[int]: '''simple docstring''' _snake_case : Union[str, Any] = '''<s>''' _snake_case : Any = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ ) def __a ( self ) -> Optional[Any]: '''simple docstring''' _snake_case : Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''<eod>''' ) self.assertEqual(len(lowercase_ ) , 1006 ) def __a ( self ) -> List[Any]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1000 ) def __a ( self ) -> int: '''simple docstring''' _snake_case : Optional[int] = XLNetTokenizer(lowercase_ , keep_accents=lowercase_ ) _snake_case : Tuple = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowercase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , [285, 46, 10, 170, 382] ) _snake_case : Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowercase_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) _snake_case : Optional[int] = tokenizer.convert_tokens_to_ids(lowercase_ ) self.assertListEqual(lowercase_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] ) _snake_case : Tuple = tokenizer.convert_ids_to_tokens(lowercase_ ) self.assertListEqual( lowercase_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def __a ( self ) -> Union[str, Any]: '''simple docstring''' _snake_case : Optional[int] = XLNetTokenizer(lowercase_ , do_lower_case=lowercase_ ) _snake_case : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowercase_ , [ SPIECE_UNDERLINE + '''''', '''i''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''se''', '''.''', ] , ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''▁he''', '''ll''', '''o'''] ) def __a ( self ) -> str: '''simple docstring''' _snake_case : Union[str, Any] = XLNetTokenizer(lowercase_ , do_lower_case=lowercase_ ) _snake_case : List[str] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowercase_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''se''', '''.''', ] , ) @slow def __a ( self ) -> Union[str, Any]: '''simple docstring''' _snake_case : List[Any] = XLNetTokenizer.from_pretrained('''xlnet-base-cased''' ) _snake_case : int = tokenizer.encode('''sequence builders''' , add_special_tokens=lowercase_ ) _snake_case : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowercase_ ) _snake_case : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase_ ) _snake_case : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ ) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def __a ( self ) -> Any: '''simple docstring''' _snake_case : Any = {'''input_ids''': [[17, 2_1442, 270, 17, 10, 1_4645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 2_2018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 1_4431, 13, 5500, 11, 1176, 580, 13, 1_6819, 4797, 23, 17, 10, 1_7135, 658, 19, 457, 7932, 13, 184, 19, 3154, 1_7135, 6468, 19, 1404, 1_2269, 19, 4229, 5356, 1_6264, 46, 19, 17, 2_0545, 1_0395, 9, 9, 9, 11, 28, 6421, 9531, 2_0729, 17, 10, 353, 1_7022, 11, 21, 6421, 9531, 1_6949, 17, 10, 1_1509, 753, 11, 33, 95, 2421, 7385, 956, 1_4431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 2_4738, 19, 1_3203, 658, 218, 787, 21, 430, 1_8482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 2_2178, 27, 1064, 22, 956, 13, 1_1101, 1429, 5854, 2_4313, 1_8953, 40, 422, 2_4366, 68, 1758, 37, 1_0483, 1_4257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 1_3894, 3380, 23, 95, 18, 1_7634, 2288, 9, 4, 3]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase_ , model_name='''xlnet-base-cased''' , revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' , )
326
import json import os import unittest from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors @require_tokenizers class A (__UpperCAmelCase ,unittest.TestCase ): _SCREAMING_SNAKE_CASE = MvpTokenizer _SCREAMING_SNAKE_CASE = MvpTokenizerFast _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = filter_roberta_detectors def __a ( self ) -> Union[str, Any]: '''simple docstring''' super().setUp() _snake_case : Union[str, Any] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] _snake_case : Tuple = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) ) _snake_case : List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] _snake_case : str = {'''unk_token''': '''<unk>'''} _snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _snake_case : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowercase_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(lowercase_ ) ) def __a ( self , **lowercase_ ) -> List[Any]: '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ ) def __a ( self , **lowercase_ ) -> Dict: '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ ) def __a ( self , lowercase_ ) -> Any: '''simple docstring''' return "lower newer", "lower newer" @cached_property def __a ( self ) -> int: '''simple docstring''' return MvpTokenizer.from_pretrained('''RUCAIBox/mvp''' ) @cached_property def __a ( self ) -> str: '''simple docstring''' return MvpTokenizerFast.from_pretrained('''RUCAIBox/mvp''' ) @require_torch def __a ( self ) -> List[str]: '''simple docstring''' _snake_case : int = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] _snake_case : Optional[Any] = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case : Optional[int] = tokenizer(lowercase_ , max_length=len(lowercase_ ) , padding=lowercase_ , return_tensors='''pt''' ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) _snake_case : int = batch.input_ids.tolist()[0] self.assertListEqual(lowercase_ , lowercase_ ) # Test that special tokens are reset @require_torch def __a ( self ) -> Optional[int]: '''simple docstring''' _snake_case : Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case : str = tokenizer(lowercase_ , padding=lowercase_ , return_tensors='''pt''' ) # check if input_ids are returned and no labels self.assertIn('''input_ids''' , lowercase_ ) self.assertIn('''attention_mask''' , lowercase_ ) self.assertNotIn('''labels''' , lowercase_ ) self.assertNotIn('''decoder_attention_mask''' , lowercase_ ) @require_torch def __a ( self ) -> Union[str, Any]: '''simple docstring''' _snake_case : Tuple = [ '''Summary of the text.''', '''Another summary.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case : List[str] = tokenizer(text_target=lowercase_ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) @require_torch def __a ( self ) -> Tuple: '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case : Union[str, Any] = tokenizer( ['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=lowercase_ , truncation=lowercase_ , return_tensors='''pt''' ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertEqual(batch.input_ids.shape , (2, 1024) ) @require_torch def __a ( self ) -> int: '''simple docstring''' _snake_case : Dict = ['''A long paragraph for summarization.'''] _snake_case : List[str] = [ '''Summary of the text.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case : Dict = tokenizer(lowercase_ , text_target=lowercase_ , return_tensors='''pt''' ) _snake_case : List[Any] = inputs['''input_ids'''] _snake_case : Dict = inputs['''labels'''] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) def __a ( self ) -> List[Any]: '''simple docstring''' pass def __a ( self ) -> List[Any]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _snake_case : Dict = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) _snake_case : str = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) _snake_case : Optional[Any] = '''A, <mask> AllenNLP sentence.''' _snake_case : Optional[Any] = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ ) _snake_case : str = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) _snake_case : List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) _snake_case : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual( lowercase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( lowercase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
326
1
from collections.abc import Callable import numpy as np def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): a__ = int(np.ceil((x_end - xa) / step_size ) ) a__ = np.zeros((n + 1,) ) a__ = ya a__ = xa for k in range(__UpperCAmelCase ): a__ = y[k] + step_size * ode_func(__UpperCAmelCase , y[k] ) a__ = y[k] + ( (step_size / 2) * (ode_func(__UpperCAmelCase , y[k] ) + ode_func(x + step_size , __UpperCAmelCase )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
148
import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html a_ : Any = 'platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class __UpperCamelCase : """simple docstring""" _lowercase : str = PegasusConfig _lowercase : int = {} _lowercase : List[Any] = '''gelu''' def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1_3 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=9_9 , SCREAMING_SNAKE_CASE=3_2 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=3_7 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=2_0 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=0 , ) -> int: a__ = parent a__ = batch_size a__ = seq_length a__ = is_training a__ = use_labels a__ = vocab_size a__ = hidden_size a__ = num_hidden_layers a__ = num_attention_heads a__ = intermediate_size a__ = hidden_dropout_prob a__ = attention_probs_dropout_prob a__ = max_position_embeddings a__ = eos_token_id a__ = pad_token_id a__ = bos_token_id def _UpperCAmelCase ( self ) -> Dict: a__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) a__ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) a__ = np.concatenate([input_ids, eos_tensor] , axis=1 ) a__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a__ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) a__ = prepare_pegasus_inputs_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return config, inputs_dict def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]: a__ = 2_0 a__ = model_class_name(SCREAMING_SNAKE_CASE ) a__ = model.encode(inputs_dict['''input_ids'''] ) a__ , a__ = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) a__ = model.init_cache(decoder_input_ids.shape[0] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) a__ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) a__ = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) a__ = model.decode( decoder_input_ids[:, :-1] , SCREAMING_SNAKE_CASE , decoder_attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , decoder_position_ids=SCREAMING_SNAKE_CASE , ) a__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) a__ = model.decode( decoder_input_ids[:, -1:] , SCREAMING_SNAKE_CASE , decoder_attention_mask=SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , decoder_position_ids=SCREAMING_SNAKE_CASE , ) a__ = model.decode(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) a__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"Max diff is {diff}" ) def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: a__ = 2_0 a__ = model_class_name(SCREAMING_SNAKE_CASE ) a__ = model.encode(inputs_dict['''input_ids'''] ) a__ , a__ = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) a__ = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) a__ = model.init_cache(decoder_input_ids.shape[0] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) a__ = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) a__ = model.decode( decoder_input_ids[:, :-1] , SCREAMING_SNAKE_CASE , decoder_attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , decoder_position_ids=SCREAMING_SNAKE_CASE , ) a__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) a__ = model.decode( decoder_input_ids[:, -1:] , SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=SCREAMING_SNAKE_CASE , decoder_position_ids=SCREAMING_SNAKE_CASE , ) a__ = model.decode(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , decoder_attention_mask=SCREAMING_SNAKE_CASE ) a__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"Max diff is {diff}" ) def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , ): if attention_mask is None: a__ = np.not_equal(__UpperCAmelCase , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: a__ = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class __UpperCamelCase ( _lowercase , unittest.TestCase ): """simple docstring""" _lowercase : Optional[int] = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) _lowercase : List[Any] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () _lowercase : int = True _lowercase : Optional[int] = False _lowercase : Optional[int] = False _lowercase : Union[str, Any] = False def _UpperCAmelCase ( self ) -> Optional[Any]: a__ = FlaxPegasusModelTester(self ) a__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( self ) -> int: self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> str: a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( self ) -> List[Any]: a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( self ) -> int: a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): a__ = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) a__ = model_class(SCREAMING_SNAKE_CASE ) @jax.jit def encode_jitted(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ): return model.encode(input_ids=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE ) with self.subTest('''JIT Enabled''' ): a__ = encode_jitted(**SCREAMING_SNAKE_CASE ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): a__ = encode_jitted(**SCREAMING_SNAKE_CASE ).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) ) for jitted_output, output in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): self.assertEqual(jitted_output.shape , output.shape ) def _UpperCAmelCase ( self ) -> int: a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): a__ = model_class(SCREAMING_SNAKE_CASE ) a__ = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] ) a__ = { '''decoder_input_ids''': inputs_dict['''decoder_input_ids'''], '''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''], '''encoder_outputs''': encoder_outputs, } @jax.jit def decode_jitted(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): return model.decode( decoder_input_ids=SCREAMING_SNAKE_CASE , decoder_attention_mask=SCREAMING_SNAKE_CASE , encoder_outputs=SCREAMING_SNAKE_CASE , ) with self.subTest('''JIT Enabled''' ): a__ = decode_jitted(**SCREAMING_SNAKE_CASE ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): a__ = decode_jitted(**SCREAMING_SNAKE_CASE ).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) ) for jitted_output, output in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): self.assertEqual(jitted_output.shape , output.shape ) @slow def _UpperCAmelCase ( self ) -> List[Any]: for model_class_name in self.all_model_classes: a__ = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=SCREAMING_SNAKE_CASE ) a__ = np.ones((1, 1) ) a__ = model(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) @slow def _UpperCAmelCase ( self ) -> int: a__ = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' ) a__ = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' ) a__ = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] a__ = [ '''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''', '''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''', ] a__ = tokenizer(SCREAMING_SNAKE_CASE , return_tensors='''np''' , truncation=SCREAMING_SNAKE_CASE , max_length=5_1_2 , padding=SCREAMING_SNAKE_CASE ) a__ = model.generate(**SCREAMING_SNAKE_CASE , num_beams=2 ).sequences a__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE ) assert tgt_text == decoded
148
1
'''simple docstring''' def A_ ( snake_case ): SCREAMING_SNAKE_CASE:List[str] = [0 for i in range(len(lowerCAmelCase__ ) )] # initialize interval's left pointer and right pointer SCREAMING_SNAKE_CASE:int = 0, 0 for i in range(1 , len(lowerCAmelCase__ ) ): # case when current index is inside the interval if i <= right_pointer: SCREAMING_SNAKE_CASE:List[Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] ) SCREAMING_SNAKE_CASE:List[str] = min_edge while go_next(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: SCREAMING_SNAKE_CASE:str = i, i + z_result[i] - 1 return z_result def A_ ( snake_case , snake_case , snake_case ): return i + z_result[i] < len(lowerCAmelCase__ ) and s[z_result[i]] == s[i + z_result[i]] def A_ ( snake_case , snake_case ): SCREAMING_SNAKE_CASE:List[Any] = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string SCREAMING_SNAKE_CASE:List[str] = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(lowerCAmelCase__ ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
143
"""simple docstring""" import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class __UpperCAmelCase : def UpperCAmelCase ( self : List[str] ) -> List[str]: '''simple docstring''' torch.manual_seed(0 ) a__ : List[str] = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" ) torch.manual_seed(0 ) a__ : int = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" ) torch.manual_seed(0 ) a__ : Any = UNetaDConditionModel( sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[ "ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D", ] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) a__ : str = DDPMScheduler( num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=a_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , ) torch.manual_seed(0 ) a__ : Optional[int] = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' torch.manual_seed(0 ) a__ : List[Any] = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" ) torch.manual_seed(0 ) a__ : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" ) torch.manual_seed(0 ) a__ : Dict = UNetaDConditionModel( sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[ "ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D", ] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) a__ : Optional[int] = DDPMScheduler( num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=a_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , ) torch.manual_seed(0 ) a__ : Tuple = DDPMScheduler( num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , ) torch.manual_seed(0 ) a__ : Optional[Any] = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def UpperCAmelCase ( self : List[Any] ) -> Tuple: '''simple docstring''' a__ : Dict = self.get_dummy_components() a__ : Any = self.pipeline_class(**a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) a__ : Any = self.get_dummy_inputs(a_ ) a__ : Optional[int] = inputs["prompt"] a__ : List[Any] = inputs["generator"] a__ : Optional[int] = inputs["num_inference_steps"] a__ : Any = inputs["output_type"] if "image" in inputs: a__ : Any = inputs["image"] else: a__ : Dict = None if "mask_image" in inputs: a__ : Optional[int] = inputs["mask_image"] else: a__ : Any = None if "original_image" in inputs: a__ : List[Any] = inputs["original_image"] else: a__ : str = None a__ , a__ : Optional[int] = pipe.encode_prompt(a_ ) # inputs with prompt converted to embeddings a__ : Union[str, Any] = { "prompt_embeds": prompt_embeds, "negative_prompt_embeds": negative_prompt_embeds, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, } if image is not None: a__ : Dict = image if mask_image is not None: a__ : Any = mask_image if original_image is not None: a__ : Optional[int] = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(a_ , a_ , a_ ) a__ : int = pipe(**a_ )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(a_ ) a__ : List[str] = self.pipeline_class.from_pretrained(a_ ) pipe_loaded.to(a_ ) pipe_loaded.set_progress_bar_config(disable=a_ ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(a_ , a_ ) is None , F"`{optional_component}` did not stay set to None after loading." , ) a__ : Union[str, Any] = self.get_dummy_inputs(a_ ) a__ : str = inputs["generator"] a__ : Dict = inputs["num_inference_steps"] a__ : Optional[int] = inputs["output_type"] # inputs with prompt converted to embeddings a__ : List[Any] = { "prompt_embeds": prompt_embeds, "negative_prompt_embeds": negative_prompt_embeds, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, } if image is not None: a__ : Dict = image if mask_image is not None: a__ : Any = mask_image if original_image is not None: a__ : Dict = original_image a__ : Optional[Any] = pipe_loaded(**a_ )[0] a__ : int = np.abs(to_np(a_ ) - to_np(a_ ) ).max() self.assertLess(a_ , 1E-4 ) def UpperCAmelCase ( self : int ) -> Any: '''simple docstring''' a__ : Dict = self.get_dummy_components() a__ : Dict = self.pipeline_class(**a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) a__ : List[str] = self.get_dummy_inputs(a_ ) a__ : Dict = pipe(**a_ )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(a_ ) a__ : str = self.pipeline_class.from_pretrained(a_ ) pipe_loaded.to(a_ ) pipe_loaded.set_progress_bar_config(disable=a_ ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests a__ : Optional[int] = self.get_dummy_inputs(a_ ) a__ : Optional[int] = pipe_loaded(**a_ )[0] a__ : List[Any] = np.abs(to_np(a_ ) - to_np(a_ ) ).max() self.assertLess(a_ , 1E-4 )
642
0
"""simple docstring""" import operator as op def _lowerCAmelCase ( lowerCamelCase__ : Tuple ) -> List[str]: _SCREAMING_SNAKE_CASE : Optional[int] = [] _SCREAMING_SNAKE_CASE : str = lambda lowerCamelCase__, lowerCamelCase__ : int(x / y ) # noqa: E731 integer division operation _SCREAMING_SNAKE_CASE : Any = { "^": op.pow, "*": op.mul, "/": div, "+": op.add, "-": op.sub, } # operators & their respective operation # print table header print("Symbol".center(8 ), "Action".center(1_2 ), "Stack", sep=" | " ) print("-" * (3_0 + len(lowerCamelCase__ )) ) for x in post_fix: if x.isdigit(): # if x in digit stack.append(lowerCamelCase__ ) # append x to stack # output in tabular format print(x.rjust(8 ), ("push(" + x + ")").ljust(1_2 ), ",".join(lowerCamelCase__ ), sep=" | " ) else: _SCREAMING_SNAKE_CASE : Dict = stack.pop() # pop stack # output in tabular format print("".rjust(8 ), ("pop(" + b + ")").ljust(1_2 ), ",".join(lowerCamelCase__ ), sep=" | " ) _SCREAMING_SNAKE_CASE : Any = stack.pop() # pop stack # output in tabular format print("".rjust(8 ), ("pop(" + a + ")").ljust(1_2 ), ",".join(lowerCamelCase__ ), sep=" | " ) stack.append( str(opr[x](int(lowerCamelCase__ ), int(lowerCamelCase__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8 ), ("push(" + a + x + b + ")").ljust(1_2 ), ",".join(lowerCamelCase__ ), sep=" | ", ) return int(stack[0] ) if __name__ == "__main__": lowercase_ : int = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''') print('''\n\tResult = ''', solve(Postfix))
295
"""simple docstring""" import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class UpperCamelCase ( __SCREAMING_SNAKE_CASE ): A__ = ["""image_processor""", """tokenizer"""] A__ = """AutoImageProcessor""" A__ = """AutoTokenizer""" def __init__( self , snake_case__=None , snake_case__=None , **snake_case__ ): """simple docstring""" _SCREAMING_SNAKE_CASE : int = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , snake_case__ , ) _SCREAMING_SNAKE_CASE : Any = kwargs.pop("feature_extractor" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(snake_case__ , snake_case__ ) _SCREAMING_SNAKE_CASE : Any = self.image_processor _SCREAMING_SNAKE_CASE : Any = False def __call__( self , *snake_case__ , **snake_case__ ): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*snake_case__ , **snake_case__ ) _SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.pop("images" , snake_case__ ) _SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop("text" , snake_case__ ) if len(snake_case__ ) > 0: _SCREAMING_SNAKE_CASE : Optional[Any] = args[0] _SCREAMING_SNAKE_CASE : List[str] = args[1:] if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process." ) if images is not None: _SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor(snake_case__ , *snake_case__ , **snake_case__ ) if text is not None: _SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer(snake_case__ , **snake_case__ ) if text is None: return inputs elif images is None: return encodings else: _SCREAMING_SNAKE_CASE : List[Any] = encodings["input_ids"] return inputs def __SCREAMING_SNAKE_CASE ( self , *snake_case__ , **snake_case__ ): """simple docstring""" return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def __SCREAMING_SNAKE_CASE ( self , *snake_case__ , **snake_case__ ): """simple docstring""" return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @contextmanager def __SCREAMING_SNAKE_CASE ( self ): """simple docstring""" warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your images inputs, or in a separate call." ) _SCREAMING_SNAKE_CASE : Dict = True _SCREAMING_SNAKE_CASE : str = self.tokenizer yield _SCREAMING_SNAKE_CASE : str = self.image_processor _SCREAMING_SNAKE_CASE : int = False def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__=False , snake_case__=None ): """simple docstring""" if added_vocab is None: _SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer.get_added_vocab() _SCREAMING_SNAKE_CASE : int = {} while tokens: _SCREAMING_SNAKE_CASE : List[Any] = re.search(r"<s_(.*?)>" , snake_case__ , re.IGNORECASE ) if start_token is None: break _SCREAMING_SNAKE_CASE : List[Any] = start_token.group(1 ) _SCREAMING_SNAKE_CASE : Any = re.search(rF'''</s_{key}>''' , snake_case__ , re.IGNORECASE ) _SCREAMING_SNAKE_CASE : Tuple = start_token.group() if end_token is None: _SCREAMING_SNAKE_CASE : Optional[int] = tokens.replace(snake_case__ , "" ) else: _SCREAMING_SNAKE_CASE : Any = end_token.group() _SCREAMING_SNAKE_CASE : Union[str, Any] = re.escape(snake_case__ ) _SCREAMING_SNAKE_CASE : Union[str, Any] = re.escape(snake_case__ ) _SCREAMING_SNAKE_CASE : str = re.search(F'''{start_token_escaped}(.*?){end_token_escaped}''' , snake_case__ , re.IGNORECASE ) if content is not None: _SCREAMING_SNAKE_CASE : Optional[Any] = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node _SCREAMING_SNAKE_CASE : Dict = self.tokenajson(snake_case__ , is_inner_value=snake_case__ , added_vocab=snake_case__ ) if value: if len(snake_case__ ) == 1: _SCREAMING_SNAKE_CASE : Optional[Any] = value[0] _SCREAMING_SNAKE_CASE : Dict = value else: # leaf nodes _SCREAMING_SNAKE_CASE : Dict = [] for leaf in content.split(r"<sep/>" ): _SCREAMING_SNAKE_CASE : int = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": _SCREAMING_SNAKE_CASE : Optional[Any] = leaf[1:-2] # for categorical special tokens output[key].append(snake_case__ ) if len(output[key] ) == 1: _SCREAMING_SNAKE_CASE : Dict = output[key][0] _SCREAMING_SNAKE_CASE : List[Any] = tokens[tokens.find(snake_case__ ) + len(snake_case__ ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=snake_case__ , added_vocab=snake_case__ ) if len(snake_case__ ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def __SCREAMING_SNAKE_CASE ( self ): """simple docstring""" warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , snake_case__ , ) return self.image_processor_class @property def __SCREAMING_SNAKE_CASE ( self ): """simple docstring""" warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , snake_case__ , ) return self.image_processor
295
1
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCamelCase_ = { "configuration_efficientnet": [ "EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "EfficientNetConfig", "EfficientNetOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["EfficientNetImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST", "EfficientNetForImageClassification", "EfficientNetModel", "EfficientNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_efficientnet import ( EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientNetConfig, EfficientNetOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientnet import EfficientNetImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientnet import ( EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientNetForImageClassification, EfficientNetModel, EfficientNetPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
28
'''simple docstring''' import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ): '''simple docstring''' lowerCAmelCase__ : Dict = (PNDMScheduler,) lowerCAmelCase__ : Tuple = (("num_inference_steps", 50),) def _lowerCamelCase ( self : Tuple ,**UpperCamelCase : Any ) -> Dict: _lowercase : Tuple = { 'num_train_timesteps': 1000, 'beta_start': 0.0_0_0_1, 'beta_end': 0.0_2, 'beta_schedule': 'linear', } config.update(**UpperCamelCase ) return config def _lowerCamelCase ( self : Optional[int] ,UpperCamelCase : Dict=0 ,**UpperCamelCase : List[str] ) -> Any: _lowercase : Optional[Any] = dict(self.forward_default_kwargs ) _lowercase : Tuple = kwargs.pop('num_inference_steps' ,UpperCamelCase ) _lowercase : Tuple = self.dummy_sample _lowercase : Optional[int] = 0.1 * sample _lowercase : Dict = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] for scheduler_class in self.scheduler_classes: _lowercase : str = self.get_scheduler_config(**UpperCamelCase ) _lowercase : List[Any] = scheduler_class(**UpperCamelCase ) scheduler.set_timesteps(UpperCamelCase ) # copy over dummy past residuals _lowercase : str = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCamelCase ) _lowercase : List[str] = scheduler_class.from_pretrained(UpperCamelCase ) new_scheduler.set_timesteps(UpperCamelCase ) # copy over dummy past residuals _lowercase : Any = dummy_past_residuals[:] _lowercase : Dict = scheduler.step_prk(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,**UpperCamelCase ).prev_sample _lowercase : Optional[Any] = new_scheduler.step_prk(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,**UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" _lowercase : Union[str, Any] = scheduler.step_plms(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,**UpperCamelCase ).prev_sample _lowercase : Optional[Any] = new_scheduler.step_plms(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,**UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def _lowerCamelCase ( self : str ) -> List[Any]: pass def _lowerCamelCase ( self : Dict ,UpperCamelCase : List[Any]=0 ,**UpperCamelCase : List[Any] ) -> List[Any]: _lowercase : int = dict(self.forward_default_kwargs ) _lowercase : Any = kwargs.pop('num_inference_steps' ,UpperCamelCase ) _lowercase : List[str] = self.dummy_sample _lowercase : Dict = 0.1 * sample _lowercase : Any = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] for scheduler_class in self.scheduler_classes: _lowercase : int = self.get_scheduler_config() _lowercase : Union[str, Any] = scheduler_class(**UpperCamelCase ) scheduler.set_timesteps(UpperCamelCase ) # copy over dummy past residuals (must be after setting timesteps) _lowercase : int = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCamelCase ) _lowercase : Union[str, Any] = scheduler_class.from_pretrained(UpperCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(UpperCamelCase ) # copy over dummy past residual (must be after setting timesteps) _lowercase : Any = dummy_past_residuals[:] _lowercase : Dict = scheduler.step_prk(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,**UpperCamelCase ).prev_sample _lowercase : List[Any] = new_scheduler.step_prk(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,**UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" _lowercase : int = scheduler.step_plms(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,**UpperCamelCase ).prev_sample _lowercase : Tuple = new_scheduler.step_plms(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,**UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def _lowerCamelCase ( self : Optional[int] ,**UpperCamelCase : Any ) -> List[Any]: _lowercase : Dict = self.scheduler_classes[0] _lowercase : Union[str, Any] = self.get_scheduler_config(**UpperCamelCase ) _lowercase : Optional[int] = scheduler_class(**UpperCamelCase ) _lowercase : Dict = 10 _lowercase : str = self.dummy_model() _lowercase : Tuple = self.dummy_sample_deter scheduler.set_timesteps(UpperCamelCase ) for i, t in enumerate(scheduler.prk_timesteps ): _lowercase : Any = model(UpperCamelCase ,UpperCamelCase ) _lowercase : Union[str, Any] = scheduler.step_prk(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): _lowercase : Optional[Any] = model(UpperCamelCase ,UpperCamelCase ) _lowercase : Tuple = scheduler.step_plms(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ).prev_sample return sample def _lowerCamelCase ( self : Optional[Any] ) -> List[str]: _lowercase : Union[str, Any] = dict(self.forward_default_kwargs ) _lowercase : List[str] = kwargs.pop('num_inference_steps' ,UpperCamelCase ) for scheduler_class in self.scheduler_classes: _lowercase : str = self.get_scheduler_config() _lowercase : Dict = scheduler_class(**UpperCamelCase ) _lowercase : int = self.dummy_sample _lowercase : List[str] = 0.1 * sample if num_inference_steps is not None and hasattr(UpperCamelCase ,'set_timesteps' ): scheduler.set_timesteps(UpperCamelCase ) elif num_inference_steps is not None and not hasattr(UpperCamelCase ,'set_timesteps' ): _lowercase : List[Any] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) _lowercase : List[str] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] _lowercase : List[Any] = dummy_past_residuals[:] _lowercase : List[str] = scheduler.step_prk(UpperCamelCase ,0 ,UpperCamelCase ,**UpperCamelCase ).prev_sample _lowercase : List[str] = scheduler.step_prk(UpperCamelCase ,1 ,UpperCamelCase ,**UpperCamelCase ).prev_sample self.assertEqual(output_a.shape ,sample.shape ) self.assertEqual(output_a.shape ,output_a.shape ) _lowercase : str = scheduler.step_plms(UpperCamelCase ,0 ,UpperCamelCase ,**UpperCamelCase ).prev_sample _lowercase : Any = scheduler.step_plms(UpperCamelCase ,1 ,UpperCamelCase ,**UpperCamelCase ).prev_sample self.assertEqual(output_a.shape ,sample.shape ) self.assertEqual(output_a.shape ,output_a.shape ) def _lowerCamelCase ( self : Optional[int] ) -> List[str]: for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=UpperCamelCase ) def _lowerCamelCase ( self : Union[str, Any] ) -> int: for steps_offset in [0, 1]: self.check_over_configs(steps_offset=UpperCamelCase ) _lowercase : Tuple = self.scheduler_classes[0] _lowercase : List[str] = self.get_scheduler_config(steps_offset=1 ) _lowercase : Any = scheduler_class(**UpperCamelCase ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps ,torch.LongTensor( [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) ,) def _lowerCamelCase ( self : Any ) -> Optional[int]: for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] ,[0.0_0_2, 0.0_2] ): self.check_over_configs(beta_start=UpperCamelCase ,beta_end=UpperCamelCase ) def _lowerCamelCase ( self : Any ) -> Union[str, Any]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=UpperCamelCase ) def _lowerCamelCase ( self : List[Any] ) -> List[str]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=UpperCamelCase ) def _lowerCamelCase ( self : Optional[Any] ) -> Any: for t in [1, 5, 10]: self.check_over_forward(time_step=UpperCamelCase ) def _lowerCamelCase ( self : Dict ) -> Union[str, Any]: for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ): self.check_over_forward(num_inference_steps=UpperCamelCase ) def _lowerCamelCase ( self : Union[str, Any] ) -> Optional[int]: # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 _lowercase : Dict = 27 for scheduler_class in self.scheduler_classes: _lowercase : List[Any] = self.dummy_sample _lowercase : List[str] = 0.1 * sample _lowercase : Union[str, Any] = self.get_scheduler_config() _lowercase : Any = scheduler_class(**UpperCamelCase ) scheduler.set_timesteps(UpperCamelCase ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): _lowercase : Optional[int] = scheduler.step_prk(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ).prev_sample def _lowerCamelCase ( self : Dict ) -> Dict: with self.assertRaises(UpperCamelCase ): _lowercase : Optional[int] = self.scheduler_classes[0] _lowercase : str = self.get_scheduler_config() _lowercase : Tuple = scheduler_class(**UpperCamelCase ) scheduler.step_plms(self.dummy_sample ,1 ,self.dummy_sample ).prev_sample def _lowerCamelCase ( self : Optional[int] ) -> int: _lowercase : Any = self.full_loop() _lowercase : Optional[Any] = torch.sum(torch.abs(UpperCamelCase ) ) _lowercase : Any = torch.mean(torch.abs(UpperCamelCase ) ) assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2 assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3 def _lowerCamelCase ( self : Any ) -> Union[str, Any]: _lowercase : Tuple = self.full_loop(prediction_type='v_prediction' ) _lowercase : Optional[Any] = torch.sum(torch.abs(UpperCamelCase ) ) _lowercase : int = torch.mean(torch.abs(UpperCamelCase ) ) assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2 assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3 def _lowerCamelCase ( self : List[Any] ) -> str: # We specify different beta, so that the first alpha is 0.99 _lowercase : List[Any] = self.full_loop(set_alpha_to_one=UpperCamelCase ,beta_start=0.0_1 ) _lowercase : List[Any] = torch.sum(torch.abs(UpperCamelCase ) ) _lowercase : Any = torch.mean(torch.abs(UpperCamelCase ) ) assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2 assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3 def _lowerCamelCase ( self : Any ) -> Optional[int]: # We specify different beta, so that the first alpha is 0.99 _lowercase : Union[str, Any] = self.full_loop(set_alpha_to_one=UpperCamelCase ,beta_start=0.0_1 ) _lowercase : List[Any] = torch.sum(torch.abs(UpperCamelCase ) ) _lowercase : List[Any] = torch.mean(torch.abs(UpperCamelCase ) ) assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2 assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
125
0
import unittest from dataclasses import dataclass import pytest from accelerate.commands.config.config_args import SageMakerConfig from accelerate.utils import ComputeEnvironment from accelerate.utils.launch import _convert_nargs_to_dict @dataclass class __SCREAMING_SNAKE_CASE ( __lowercase): _SCREAMING_SNAKE_CASE : Optional[int] = ComputeEnvironment.AMAZON_SAGEMAKER _SCREAMING_SNAKE_CASE : Optional[int] = True _SCREAMING_SNAKE_CASE : Union[str, Any] = '''ml.p3.2xlarge''' _SCREAMING_SNAKE_CASE : str = '''accelerate_sagemaker_execution_role''' _SCREAMING_SNAKE_CASE : Optional[int] = '''hf-sm''' _SCREAMING_SNAKE_CASE : Dict = '''us-east-1''' _SCREAMING_SNAKE_CASE : Optional[Any] = 1 _SCREAMING_SNAKE_CASE : List[Any] = '''accelerate-sagemaker-1''' _SCREAMING_SNAKE_CASE : int = '''1.6''' _SCREAMING_SNAKE_CASE : Tuple = '''4.4''' _SCREAMING_SNAKE_CASE : List[str] = '''train.py''' _SCREAMING_SNAKE_CASE : str = [ '''--model_name_or_path''', '''bert''', '''--do_train''', '''False''', '''--epochs''', '''3''', '''--learning_rate''', '''5e-5''', '''--max_steps''', '''50.5''', ] _SCREAMING_SNAKE_CASE : Optional[Any] = [ '''--model_name_or_path''', '''bert''', '''--do_train''', '''--do_test''', '''False''', '''--do_predict''', '''--epochs''', '''3''', '''--learning_rate''', '''5e-5''', '''--max_steps''', '''50.5''', ] class __SCREAMING_SNAKE_CASE ( unittest.TestCase): def UpperCamelCase__ ( self ): """simple docstring""" # If no defaults are changed, `to_kwargs` returns an empty dict. lowerCAmelCase__ = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args ) assert isinstance(converted_args['model_name_or_path'] , _UpperCamelCase ) assert isinstance(converted_args['do_train'] , _UpperCamelCase ) assert isinstance(converted_args['epochs'] , _UpperCamelCase ) assert isinstance(converted_args['learning_rate'] , _UpperCamelCase ) assert isinstance(converted_args['max_steps'] , _UpperCamelCase ) with pytest.raises(_UpperCamelCase ): _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
365
from typing import Dict, Optional import numpy as np import datasets __snake_case : str = """ IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation, the mean IoU of the image is calculated by taking the IoU of each class and averaging them. """ __snake_case : Tuple = """ Args: predictions (`List[ndarray]`): List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. references (`List[ndarray]`): List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. num_labels (`int`): Number of classes (categories). ignore_index (`int`): Index that will be ignored during evaluation. nan_to_num (`int`, *optional*): If specified, NaN values will be replaced by the number defined by the user. label_map (`dict`, *optional*): If specified, dictionary mapping old label indices to new label indices. reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. Returns: `Dict[str, float | ndarray]` comprising various elements: - *mean_iou* (`float`): Mean Intersection-over-Union (IoU averaged over all categories). - *mean_accuracy* (`float`): Mean accuracy (averaged over all categories). - *overall_accuracy* (`float`): Overall accuracy on all images. - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`): Per category accuracy. - *per_category_iou* (`ndarray` of shape `(num_labels,)`): Per category IoU. Examples: >>> import numpy as np >>> mean_iou = datasets.load_metric(\"mean_iou\") >>> # suppose one has 3 different segmentation maps predicted >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]]) >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]]) >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]]) >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]]) >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]]) >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]]) >>> predicted = [predicted_1, predicted_2, predicted_3] >>> ground_truth = [actual_1, actual_2, actual_3] >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False) >>> print(results) # doctest: +NORMALIZE_WHITESPACE {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])} """ __snake_case : Any = """\ @software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020, author = {{MMSegmentation Contributors}}, license = {Apache-2.0}, month = {7}, title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}}, url = {https://github.com/open-mmlab/mmsegmentation}, year = {2020} }""" def _UpperCamelCase ( UpperCamelCase_ : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : bool , UpperCamelCase_ : Optional[Dict[int, int]] = None , UpperCamelCase_ : bool = False , ) -> List[Any]: """simple docstring""" if label_map is not None: for old_id, new_id in label_map.items(): lowerCAmelCase__ = new_id # turn into Numpy arrays lowerCAmelCase__ = np.array(UpperCamelCase_ ) lowerCAmelCase__ = np.array(UpperCamelCase_ ) if reduce_labels: lowerCAmelCase__ = 255 lowerCAmelCase__ = label - 1 lowerCAmelCase__ = 255 lowerCAmelCase__ = label != ignore_index lowerCAmelCase__ = np.not_equal(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase__ = pred_label[mask] lowerCAmelCase__ = np.array(UpperCamelCase_ )[mask] lowerCAmelCase__ = pred_label[pred_label == label] lowerCAmelCase__ = np.histogram(UpperCamelCase_ , bins=UpperCamelCase_ , range=(0, num_labels - 1) )[0] lowerCAmelCase__ = np.histogram(UpperCamelCase_ , bins=UpperCamelCase_ , range=(0, num_labels - 1) )[0] lowerCAmelCase__ = np.histogram(UpperCamelCase_ , bins=UpperCamelCase_ , range=(0, num_labels - 1) )[0] lowerCAmelCase__ = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def _UpperCamelCase ( UpperCamelCase_ : Dict , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : bool , UpperCamelCase_ : Optional[Dict[int, int]] = None , UpperCamelCase_ : bool = False , ) -> Union[str, Any]: """simple docstring""" lowerCAmelCase__ = np.zeros((num_labels,) , dtype=np.floataa ) lowerCAmelCase__ = np.zeros((num_labels,) , dtype=np.floataa ) lowerCAmelCase__ = np.zeros((num_labels,) , dtype=np.floataa ) lowerCAmelCase__ = np.zeros((num_labels,) , dtype=np.floataa ) for result, gt_seg_map in zip(UpperCamelCase_ , UpperCamelCase_ ): lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = intersect_and_union( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def _UpperCamelCase ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : bool , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Dict[int, int]] = None , UpperCamelCase_ : bool = False , ) -> List[str]: """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = total_intersect_and_union( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # compute metrics lowerCAmelCase__ = {} lowerCAmelCase__ = total_area_intersect.sum() / total_area_label.sum() lowerCAmelCase__ = total_area_intersect / total_area_union lowerCAmelCase__ = total_area_intersect / total_area_label lowerCAmelCase__ = np.nanmean(UpperCamelCase_ ) lowerCAmelCase__ = np.nanmean(UpperCamelCase_ ) lowerCAmelCase__ = all_acc lowerCAmelCase__ = iou lowerCAmelCase__ = acc if nan_to_num is not None: lowerCAmelCase__ = {metric: np.nan_to_num(UpperCamelCase_ , nan=UpperCamelCase_ ) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class __SCREAMING_SNAKE_CASE ( datasets.Metric): def UpperCamelCase__ ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { 'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ), 'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ), } ) , reference_urls=[ 'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py' ] , ) def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , ): """simple docstring""" lowerCAmelCase__ = mean_iou( results=_UpperCamelCase , gt_seg_maps=_UpperCamelCase , num_labels=_UpperCamelCase , ignore_index=_UpperCamelCase , nan_to_num=_UpperCamelCase , label_map=_UpperCamelCase , reduce_labels=_UpperCamelCase , ) return iou_result
365
1
'''simple docstring''' def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ): assert column_title.isupper() __a : str = 0 __a : List[Any] = len(_lowercase ) - 1 __a : List[str] = 0 while index >= 0: __a : Optional[Any] = (ord(column_title[index] ) - 64) * pow(26 , _lowercase ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
597
'''simple docstring''' from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class a : """simple docstring""" def __init__( self , snake_case_ , ): '''simple docstring''' __UpperCAmelCase: List[Any] = parent __UpperCAmelCase: Dict = 13 __UpperCAmelCase: Optional[int] = 7 __UpperCAmelCase: List[str] = 30 __UpperCAmelCase: List[Any] = self.seq_length + self.mem_len __UpperCAmelCase: int = 15 __UpperCAmelCase: Optional[int] = True __UpperCAmelCase: List[str] = True __UpperCAmelCase: Union[str, Any] = 99 __UpperCAmelCase: Optional[int] = [10, 50, 80] __UpperCAmelCase: str = 32 __UpperCAmelCase: Optional[Any] = 32 __UpperCAmelCase: Union[str, Any] = 4 __UpperCAmelCase: int = 8 __UpperCAmelCase: str = 128 __UpperCAmelCase: str = 2 __UpperCAmelCase: Tuple = 2 __UpperCAmelCase: Union[str, Any] = None __UpperCAmelCase: str = 1 __UpperCAmelCase: Optional[Any] = 0 __UpperCAmelCase: int = 3 __UpperCAmelCase: Dict = self.vocab_size - 1 __UpperCAmelCase: int = 0.0_1 def lowercase_ ( self ): '''simple docstring''' __UpperCAmelCase: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase: List[str] = None if self.use_labels: __UpperCAmelCase: int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase: Optional[int] = TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def lowercase_ ( self ): '''simple docstring''' random.seed(self.seed ) tf.random.set_seed(self.seed ) def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): '''simple docstring''' __UpperCAmelCase: Dict = TFTransfoXLModel(snake_case_ ) __UpperCAmelCase, __UpperCAmelCase: List[str] = model(snake_case_ ).to_tuple() __UpperCAmelCase: Tuple = {"""input_ids""": input_ids_a, """mems""": mems_a} __UpperCAmelCase, __UpperCAmelCase: Optional[Any] = model(snake_case_ ).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): '''simple docstring''' __UpperCAmelCase: str = TFTransfoXLLMHeadModel(snake_case_ ) __UpperCAmelCase, __UpperCAmelCase: Optional[int] = model(snake_case_ ).to_tuple() __UpperCAmelCase: Optional[Any] = {"""input_ids""": input_ids_a, """labels""": lm_labels} __UpperCAmelCase, __UpperCAmelCase: Tuple = model(snake_case_ ).to_tuple() __UpperCAmelCase, __UpperCAmelCase: Dict = model([input_ids_a, mems_a] ).to_tuple() __UpperCAmelCase: Union[str, Any] = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels} __UpperCAmelCase, __UpperCAmelCase: List[str] = model(snake_case_ ).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): '''simple docstring''' __UpperCAmelCase: Optional[int] = TFTransfoXLForSequenceClassification(snake_case_ ) __UpperCAmelCase: List[Any] = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase_ ( self ): '''simple docstring''' __UpperCAmelCase: Optional[int] = self.prepare_config_and_inputs() ((__UpperCAmelCase), (__UpperCAmelCase), (__UpperCAmelCase), (__UpperCAmelCase)): Dict = config_and_inputs __UpperCAmelCase: List[str] = {"""input_ids""": input_ids_a} return config, inputs_dict @require_tf class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): """simple docstring""" __lowerCAmelCase = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) __lowerCAmelCase = () if is_tf_available() else () __lowerCAmelCase = ( { """feature-extraction""": TFTransfoXLModel, """text-classification""": TFTransfoXLForSequenceClassification, """text-generation""": TFTransfoXLLMHeadModel, """zero-shot""": TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): '''simple docstring''' if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def lowercase_ ( self ): '''simple docstring''' __UpperCAmelCase: Dict = TFTransfoXLModelTester(self ) __UpperCAmelCase: Any = ConfigTester(self , config_class=snake_case_ , d_embed=37 ) def lowercase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self ): '''simple docstring''' self.model_tester.set_seed() __UpperCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*snake_case_ ) def lowercase_ ( self ): '''simple docstring''' self.model_tester.set_seed() __UpperCAmelCase: List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*snake_case_ ) def lowercase_ ( self ): '''simple docstring''' __UpperCAmelCase: Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*snake_case_ ) def lowercase_ ( self ): '''simple docstring''' __UpperCAmelCase, __UpperCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase: str = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: __UpperCAmelCase: int = model_class(snake_case_ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: __UpperCAmelCase: Any = model.get_output_embeddings() assert isinstance(snake_case_ , tf.keras.layers.Layer ) __UpperCAmelCase: int = model.get_bias() assert name is None else: __UpperCAmelCase: Optional[int] = model.get_output_embeddings() assert x is None __UpperCAmelCase: str = model.get_bias() assert name is None def lowercase_ ( self ): '''simple docstring''' pass @slow def lowercase_ ( self ): '''simple docstring''' for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase: str = TFTransfoXLModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) @unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" ) def lowercase_ ( self ): '''simple docstring''' pass @require_tf class a ( unittest.TestCase ): """simple docstring""" @unittest.skip("""Skip test until #12651 is resolved.""" ) @slow def lowercase_ ( self ): '''simple docstring''' __UpperCAmelCase: Optional[Any] = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" ) # fmt: off __UpperCAmelCase: str = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off __UpperCAmelCase: Dict = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> __UpperCAmelCase: Dict = model.generate(snake_case_ , max_length=200 , do_sample=snake_case_ ) self.assertListEqual(output_ids[0].numpy().tolist() , snake_case_ )
523
0
"""simple docstring""" # Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib lowercase__ = get_logger() lowercase__ = None class __snake_case ( TensorFormatter[Mapping, """jax.Array""", Mapping] ): def __init__( self , lowercase=None , lowercase=None , **lowercase) -> Optional[Any]: '''simple docstring''' super().__init__(features=lowercase) import jax from jaxlib.xla_client import Device if isinstance(lowercase , lowercase): raise ValueError( f'Expected {device} to be a `str` not {type(lowercase)}, as `jaxlib.xla_extension.Device` ' 'is not serializable neither with `pickle` nor with `dill`. Instead you can surround ' 'the device with `str()` to get its string identifier that will be internally mapped ' 'to the actual `jaxlib.xla_extension.Device`.') a__: int = device if isinstance(lowercase , lowercase) else str(jax.devices()[0]) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: a__: Any = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys()): logger.warning( f'Device with string identifier {self.device} not listed among the available ' f'devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default ' f'device: {str(jax.devices()[0])}.') a__: Any = str(jax.devices()[0]) a__: Any = jnp_array_kwargs @staticmethod def lowerCamelCase_ ( ) -> Dict[str, "jaxlib.xla_extension.Device"]: '''simple docstring''' import jax return {str(lowercase): device for device in jax.devices()} def lowerCamelCase_ ( self , lowercase) -> List[str]: '''simple docstring''' import jax import jax.numpy as jnp if isinstance(lowercase , lowercase) and column: if all( isinstance(lowercase , jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column): return jnp.stack(lowercase , axis=0) return column def lowerCamelCase_ ( self , lowercase) -> List[Any]: '''simple docstring''' import jax import jax.numpy as jnp if isinstance(lowercase , (str, bytes, type(lowercase))): return value elif isinstance(lowercase , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character): return value.tolist() a__: Optional[int] = {} if isinstance(lowercase , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: a__: List[Any] = {'dtype': jnp.intaa} else: a__: str = {'dtype': jnp.intaa} elif isinstance(lowercase , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating): a__: List[str] = {'dtype': jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(lowercase , PIL.Image.Image): a__: Tuple = np.asarray(lowercase) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: a__: Dict = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device]): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(lowercase , **{**default_dtype, **self.jnp_array_kwargs}) def lowerCamelCase_ ( self , lowercase) -> Dict: '''simple docstring''' import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(lowercase , torch.Tensor): return self._tensorize(data_struct.detach().cpu().numpy()[()]) if hasattr(lowercase , '__array__') and not isinstance(lowercase , jax.Array): a__: str = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(lowercase , np.ndarray): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(lowercase) for substruct in data_struct]) elif isinstance(lowercase , (list, tuple)): return self._consolidate([self.recursive_tensorize(lowercase) for substruct in data_struct]) return self._tensorize(lowercase) def lowerCamelCase_ ( self , lowercase) -> List[str]: '''simple docstring''' return map_nested(self._recursive_tensorize , lowercase , map_list=lowercase) def lowerCamelCase_ ( self , lowercase) -> Mapping: '''simple docstring''' a__: Union[str, Any] = self.numpy_arrow_extractor().extract_row(lowercase) a__: Tuple = self.python_features_decoder.decode_row(lowercase) return self.recursive_tensorize(lowercase) def lowerCamelCase_ ( self , lowercase) -> "jax.Array": '''simple docstring''' a__: Any = self.numpy_arrow_extractor().extract_column(lowercase) a__: Union[str, Any] = self.python_features_decoder.decode_column(lowercase , pa_table.column_names[0]) a__: str = self.recursive_tensorize(lowercase) a__: Tuple = self._consolidate(lowercase) return column def lowerCamelCase_ ( self , lowercase) -> Mapping: '''simple docstring''' a__: Any = self.numpy_arrow_extractor().extract_batch(lowercase) a__: str = self.python_features_decoder.decode_batch(lowercase) a__: List[str] = self.recursive_tensorize(lowercase) for column_name in batch: a__: int = self._consolidate(batch[column_name]) return batch
217
"""simple docstring""" import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class __snake_case : def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=64 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_12 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ) -> Optional[Any]: '''simple docstring''' a__: int = parent a__: Union[str, Any] = batch_size a__: Optional[int] = seq_length a__: int = is_training a__: Optional[Any] = use_input_mask a__: List[Any] = use_token_type_ids a__: List[str] = use_labels a__: Dict = vocab_size a__: Tuple = hidden_size a__: Optional[Any] = embedding_size a__: Optional[int] = num_hidden_layers a__: Optional[int] = num_attention_heads a__: Optional[int] = intermediate_size a__: Dict = hidden_act a__: List[str] = hidden_dropout_prob a__: str = attention_probs_dropout_prob a__: List[str] = max_position_embeddings a__: str = type_vocab_size a__: Tuple = type_sequence_label_size a__: List[Any] = initializer_range a__: Optional[Any] = num_labels a__: Optional[int] = num_choices a__: int = scope def lowerCamelCase_ ( self) -> Optional[int]: '''simple docstring''' a__: int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) a__: Union[str, Any] = None if self.use_input_mask: a__: Optional[Any] = random_attention_mask([self.batch_size, self.seq_length]) a__: Optional[Any] = None if self.use_token_type_ids: a__: Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) a__: List[Any] = None a__: Optional[int] = None a__: Optional[Any] = None if self.use_labels: a__: List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size) a__: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) a__: Optional[Any] = ids_tensor([self.batch_size] , self.num_choices) a__: Dict = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase_ ( self) -> Tuple: '''simple docstring''' return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , ) def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Optional[Any]: '''simple docstring''' a__: List[str] = MegatronBertModel(config=lowercase) model.to(lowercase) model.eval() a__: List[Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase) a__: Any = model(lowercase , token_type_ids=lowercase) a__: List[Any] = model(lowercase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> List[str]: '''simple docstring''' a__: List[str] = MegatronBertForMaskedLM(config=lowercase) model.to(lowercase) model.eval() a__: Optional[Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> int: '''simple docstring''' a__: Dict = MegatronBertForCausalLM(config=lowercase) model.to(lowercase) model.eval() a__: List[str] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Dict: '''simple docstring''' a__: Union[str, Any] = MegatronBertForNextSentencePrediction(config=lowercase) model.to(lowercase) model.eval() a__: str = model( lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2)) def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Optional[int]: '''simple docstring''' a__: int = MegatronBertForPreTraining(config=lowercase) model.to(lowercase) model.eval() a__: Dict = model( lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , next_sentence_label=lowercase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2)) def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> int: '''simple docstring''' a__: Dict = MegatronBertForQuestionAnswering(config=lowercase) model.to(lowercase) model.eval() a__: Any = model( lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Optional[Any]: '''simple docstring''' a__: Tuple = self.num_labels a__: Union[str, Any] = MegatronBertForSequenceClassification(lowercase) model.to(lowercase) model.eval() a__: int = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Dict: '''simple docstring''' a__: int = self.num_labels a__: Optional[Any] = MegatronBertForTokenClassification(config=lowercase) model.to(lowercase) model.eval() a__: Tuple = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> str: '''simple docstring''' a__: Dict = self.num_choices a__: Any = MegatronBertForMultipleChoice(config=lowercase) model.to(lowercase) model.eval() a__: List[str] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() a__: Dict = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() a__: List[str] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() a__: List[Any] = model( lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def lowerCamelCase_ ( self) -> Dict: '''simple docstring''' a__: Optional[Any] = self.prepare_config_and_inputs() ( ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ): Tuple = config_and_inputs a__: Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): a__ = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) a__ = ( { """feature-extraction""": MegatronBertModel, """fill-mask""": MegatronBertForMaskedLM, """question-answering""": MegatronBertForQuestionAnswering, """text-classification""": MegatronBertForSequenceClassification, """text-generation""": MegatronBertForCausalLM, """token-classification""": MegatronBertForTokenClassification, """zero-shot""": MegatronBertForSequenceClassification, } if is_torch_available() else {} ) a__ = True # test_resize_embeddings = False a__ = False def lowerCamelCase_ ( self , lowercase , lowercase , lowercase=False) -> Optional[int]: '''simple docstring''' a__: List[Any] = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase) if return_labels: if model_class in get_values(lowercase): a__: Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase) a__: List[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowercase) return inputs_dict def lowerCamelCase_ ( self) -> int: '''simple docstring''' a__: Dict = MegatronBertModelTester(self) a__: Dict = ConfigTester(self , config_class=lowercase , hidden_size=37) def lowerCamelCase_ ( self) -> Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase_ ( self) -> List[str]: '''simple docstring''' a__: Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*lowercase) def lowerCamelCase_ ( self) -> int: '''simple docstring''' a__: List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowercase) def lowerCamelCase_ ( self) -> Dict: '''simple docstring''' a__: Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowercase) def lowerCamelCase_ ( self) -> Optional[Any]: '''simple docstring''' a__: List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowercase) def lowerCamelCase_ ( self) -> Optional[int]: '''simple docstring''' a__: Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowercase) def lowerCamelCase_ ( self) -> Optional[Any]: '''simple docstring''' a__: Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowercase) def lowerCamelCase_ ( self) -> Optional[int]: '''simple docstring''' a__: List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowercase) def lowerCamelCase_ ( self) -> Optional[int]: '''simple docstring''' a__: int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowercase) def __a ( _SCREAMING_SNAKE_CASE ) ->Any: return torch.tensor( _SCREAMING_SNAKE_CASE , dtype=torch.long , device=_SCREAMING_SNAKE_CASE , ) lowercase__ = 1E-4 @require_torch @require_sentencepiece @require_tokenizers class __snake_case ( unittest.TestCase ): @slow @unittest.skip('Model is not available.') def lowerCamelCase_ ( self) -> List[Any]: '''simple docstring''' a__: Dict = 'nvidia/megatron-bert-uncased-345m' if "MYDIR" in os.environ: a__: List[str] = os.path.join(os.environ['MYDIR'] , lowercase) a__: Tuple = MegatronBertModel.from_pretrained(lowercase) model.to(lowercase) model.half() a__: Any = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]]) with torch.no_grad(): a__: str = model(lowercase)[0] a__: Tuple = torch.Size((1, 9, 10_24)) self.assertEqual(output.shape , lowercase) a__: Optional[Any] = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728] for ii in range(3): for jj in range(3): a__: List[Any] = output[0, ii, jj] a__: Dict = expected[3 * ii + jj] a__: str = 'ii={} jj={} a={} b={}'.format(lowercase , lowercase , lowercase , lowercase) self.assertTrue(math.isclose(lowercase , lowercase , rel_tol=lowercase , abs_tol=lowercase) , msg=lowercase)
217
1
'''simple docstring''' import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowerCamelCase ( _snake_case : Union[str, Any] ,_snake_case : Dict ,_snake_case : Any ,_snake_case : Optional[Any] ,_snake_case : List[Any] ): '''simple docstring''' with open(_snake_case ) as metadata_file: lowercase__ = json.load(_snake_case ) lowercase__ = LukeConfig(use_entity_aware_attention=_snake_case ,**metadata["model_config"] ) # Load in the weights from the checkpoint_path lowercase__ = torch.load(_snake_case ,map_location="cpu" ) # Load the entity vocab file lowercase__ = load_entity_vocab(_snake_case ) lowercase__ = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] ) # Add special tokens to the token vocabulary for downstream tasks lowercase__ = AddedToken("<ent>" ,lstrip=_snake_case ,rstrip=_snake_case ) lowercase__ = AddedToken("<ent2>" ,lstrip=_snake_case ,rstrip=_snake_case ) tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' ) tokenizer.save_pretrained(_snake_case ) with open(os.path.join(_snake_case ,LukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f: json.dump(_snake_case ,_snake_case ) lowercase__ = LukeTokenizer.from_pretrained(_snake_case ) # Initialize the embeddings of the special tokens lowercase__ = state_dict["embeddings.word_embeddings.weight"] lowercase__ = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 ) lowercase__ = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 ) lowercase__ = torch.cat([word_emb, ent_emb, enta_emb] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: lowercase__ = f'''encoder.layer.{layer_index}.attention.self.''' lowercase__ = state_dict[prefix + matrix_name] lowercase__ = state_dict[prefix + matrix_name] lowercase__ = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks lowercase__ = state_dict["entity_embeddings.entity_embeddings.weight"] lowercase__ = entity_emb[entity_vocab["[MASK]"]] lowercase__ = LukeModel(config=_snake_case ).eval() lowercase__ , lowercase__ = model.load_state_dict(_snake_case ,strict=_snake_case ) if not (len(_snake_case ) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(f'''Missing keys {', '.join(_snake_case )}. Expected only missing embeddings.position_ids''' ) if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )): raise ValueError( "Unexpected keys" f''' {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}''' ) # Check outputs lowercase__ = LukeTokenizer.from_pretrained(_snake_case ,task="entity_classification" ) lowercase__ = ( "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the" " new world number one avoid a humiliating second- round exit at Wimbledon ." ) lowercase__ = (39, 42) lowercase__ = tokenizer(_snake_case ,entity_spans=[span] ,add_prefix_space=_snake_case ,return_tensors="pt" ) lowercase__ = model(**_snake_case ) # Verify word hidden states if model_size == "large": lowercase__ = torch.Size((1, 42, 1_024) ) lowercase__ = torch.tensor( [[0.0_133, 0.0_865, 0.0_095], [0.3_093, -0.2_576, -0.7_418], [-0.1_720, -0.2_117, -0.2_869]] ) else: # base lowercase__ = torch.Size((1, 42, 768) ) lowercase__ = torch.tensor([[0.0_037, 0.1_368, -0.0_091], [0.1_099, 0.3_329, -0.1_095], [0.0_765, 0.5_335, 0.1_179]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,_snake_case ,atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": lowercase__ = torch.Size((1, 1, 1_024) ) lowercase__ = torch.tensor([[0.0_466, -0.0_106, -0.0_179]] ) else: # base lowercase__ = torch.Size((1, 1, 768) ) lowercase__ = torch.tensor([[0.1_457, 0.1_044, 0.0_174]] ) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is''' f''' {expected_shape}''' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,_snake_case ,atol=1e-4 ): raise ValueError # Finally, save our PyTorch model and tokenizer print("Saving PyTorch model to {}".format(_snake_case ) ) model.save_pretrained(_snake_case ) def lowerCamelCase ( _snake_case : Union[str, Any] ): '''simple docstring''' lowercase__ = {} with open(_snake_case ,"r" ,encoding="utf-8" ) as f: for index, line in enumerate(_snake_case ): lowercase__ , lowercase__ = line.rstrip().split("\t" ) lowercase__ = index return entity_vocab if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.") parser.add_argument( "--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration." ) parser.add_argument( "--entity_vocab_path", default=None, type=str, help="Path to an entity_vocab.tsv file, containing the entity vocabulary.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model." ) parser.add_argument( "--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted." ) SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
267
'''simple docstring''' def lowerCamelCase ( _snake_case : int = 50_000_000 ): '''simple docstring''' lowercase__ = set() lowercase__ = int((limit - 24) ** (1 / 2) ) lowercase__ = set(range(3 ,prime_square_limit + 1 ,2 ) ) primes.add(2 ) for p in range(3 ,prime_square_limit + 1 ,2 ): if p not in primes: continue primes.difference_update(set(range(p * p ,prime_square_limit + 1 ,_snake_case ) ) ) for primea in primes: lowercase__ = primea * primea for primea in primes: lowercase__ = primea * primea * primea if square + cube >= limit - 16: break for primea in primes: lowercase__ = primea * primea * primea * primea lowercase__ = square + cube + tetr if total >= limit: break ret.add(_snake_case ) return len(_snake_case ) if __name__ == "__main__": print(f'''{solution() = }''')
267
1
import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort __A : str = logging.get_logger(__name__) __A : str = { """tensor(bool)""": np.bool_, """tensor(int8)""": np.inta, """tensor(uint8)""": np.uinta, """tensor(int16)""": np.intaa, """tensor(uint16)""": np.uintaa, """tensor(int32)""": np.intaa, """tensor(uint32)""": np.uintaa, """tensor(int64)""": np.intaa, """tensor(uint64)""": np.uintaa, """tensor(float16)""": np.floataa, """tensor(float)""": np.floataa, """tensor(double)""": np.floataa, } class UpperCAmelCase_ : '''simple docstring''' def __init__( self : List[str] , a : Tuple=None , **a : Any ) -> List[str]: logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" ) SCREAMING_SNAKE_CASE = model SCREAMING_SNAKE_CASE = kwargs.get("""model_save_dir""" , a ) SCREAMING_SNAKE_CASE = kwargs.get("""latest_model_name""" , a ) def __call__( self : List[str] , **a : Any ) -> List[Any]: SCREAMING_SNAKE_CASE = {k: np.array(a ) for k, v in kwargs.items()} return self.model.run(a , a ) @staticmethod def _UpperCAmelCase ( a : Union[str, Path] , a : Any=None , a : Optional[int]=None ) -> Optional[Any]: if provider is None: logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" ) SCREAMING_SNAKE_CASE = """CPUExecutionProvider""" return ort.InferenceSession(a , providers=[provider] , sess_options=a ) def _UpperCAmelCase ( self : str , a : Union[str, Path] , a : Optional[str] = None , **a : str ) -> Tuple: SCREAMING_SNAKE_CASE = file_name if file_name is not None else ONNX_WEIGHTS_NAME SCREAMING_SNAKE_CASE = self.model_save_dir.joinpath(self.latest_model_name ) SCREAMING_SNAKE_CASE = Path(a ).joinpath(a ) try: shutil.copyfile(a , a ) except shutil.SameFileError: pass # copy external weights (for models >2GB) SCREAMING_SNAKE_CASE = self.model_save_dir.joinpath(a ) if src_path.exists(): SCREAMING_SNAKE_CASE = Path(a ).joinpath(a ) try: shutil.copyfile(a , a ) except shutil.SameFileError: pass def _UpperCAmelCase ( self : Dict , a : Union[str, os.PathLike] , **a : Tuple , ) -> str: if os.path.isfile(a ): logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" ) return os.makedirs(a , exist_ok=a ) # saving model weights/files self._save_pretrained(a , **a ) @classmethod def _UpperCAmelCase ( cls : List[Any] , a : Union[str, Path] , a : Optional[Union[bool, str, None]] = None , a : Optional[Union[str, None]] = None , a : bool = False , a : Optional[str] = None , a : Optional[str] = None , a : Optional[str] = None , a : Optional["ort.SessionOptions"] = None , **a : Union[str, Any] , ) -> List[str]: SCREAMING_SNAKE_CASE = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(a ): SCREAMING_SNAKE_CASE = OnnxRuntimeModel.load_model( os.path.join(a , a ) , provider=a , sess_options=a ) SCREAMING_SNAKE_CASE = Path(a ) # load model from hub else: # download model SCREAMING_SNAKE_CASE = hf_hub_download( repo_id=a , filename=a , use_auth_token=a , revision=a , cache_dir=a , force_download=a , ) SCREAMING_SNAKE_CASE = Path(a ).parent SCREAMING_SNAKE_CASE = Path(a ).name SCREAMING_SNAKE_CASE = OnnxRuntimeModel.load_model(a , provider=a , sess_options=a ) return cls(model=a , **a ) @classmethod def _UpperCAmelCase ( cls : List[Any] , a : Union[str, Path] , a : bool = True , a : Optional[str] = None , a : Optional[str] = None , **a : Union[str, Any] , ) -> Any: SCREAMING_SNAKE_CASE = None if len(str(a ).split("""@""" ) ) == 2: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model_id.split("""@""" ) return cls._from_pretrained( model_id=a , revision=a , cache_dir=a , force_download=a , use_auth_token=a , **a , )
707
import argparse from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt if __name__ == "__main__": __A : Tuple = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) parser.add_argument( """--original_config_file""", type=str, required=True, help="""The YAML config file corresponding to the original architecture.""", ) parser.add_argument( """--num_in_channels""", default=None, type=int, help="""The number of input channels. If `None` number of input channels will be automatically inferred.""", ) parser.add_argument( """--image_size""", default=5_1_2, type=int, help=( """The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2""" """ Base. Use 768 for Stable Diffusion v2.""" ), ) parser.add_argument( """--extract_ema""", action="""store_true""", help=( """Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights""" """ or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield""" """ higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.""" ), ) parser.add_argument( """--upcast_attention""", action="""store_true""", help=( """Whether the attention computation should always be upcasted. This is necessary when running stable""" """ diffusion 2.1.""" ), ) parser.add_argument( """--from_safetensors""", action="""store_true""", help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""", ) parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""", ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ): '''simple docstring''' if string == "True": return True elif string == "False": return False else: raise ValueError(f"""could not parse string as bool {string}""" ) parser.add_argument( """--use_linear_projection""", help="""Override for use linear projection""", required=False, type=parse_bool ) parser.add_argument("""--cross_attention_dim""", help="""Override for cross attention_dim""", required=False, type=int) __A : Union[str, Any] = parser.parse_args() __A : Any = download_controlnet_from_original_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, extract_ema=args.extract_ema, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, use_linear_projection=args.use_linear_projection, cross_attention_dim=args.cross_attention_dim, ) controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
450
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { """RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""", """RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""", """RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""", """RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""", """RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""", """RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""", """RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""", """RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""", """RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""", """RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""", } class lowercase__ ( A_ ): __UpperCAmelCase = '''rwkv''' __UpperCAmelCase = {'''max_position_embeddings''': '''context_length'''} def __init__( self , SCREAMING_SNAKE_CASE=5_0277 , SCREAMING_SNAKE_CASE=1024 , SCREAMING_SNAKE_CASE=4096 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]: _lowerCamelCase : List[str] = vocab_size _lowerCamelCase : Any = context_length _lowerCamelCase : str = hidden_size _lowerCamelCase : Union[str, Any] = num_hidden_layers _lowerCamelCase : Dict = attention_hidden_size if attention_hidden_size is not None else hidden_size _lowerCamelCase : Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size _lowerCamelCase : int = layer_norm_epsilon _lowerCamelCase : int = rescale_every _lowerCamelCase : Optional[int] = use_cache _lowerCamelCase : Dict = bos_token_id _lowerCamelCase : List[Any] = eos_token_id super().__init__( tie_word_embeddings=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
88
from __future__ import annotations class lowerCAmelCase : '''simple docstring''' def __init__( self : str , __a : Dict=None ) -> int: """simple docstring""" __lowercase : int = data __lowercase : Optional[int] = None def __repr__( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" __lowercase : Any = [] __lowercase : Dict = self while temp: string_rep.append(F"{temp.data}" ) __lowercase : Union[str, Any] = temp.next return "->".join(__a ) def snake_case_ ( lowerCAmelCase_ : list ): if not elements_list: raise Exception("""The Elements List is empty""" ) __lowercase : List[str] = Node(elements_list[0] ) for i in range(1 , len(lowerCAmelCase_ ) ): __lowercase : int = Node(elements_list[i] ) __lowercase : List[str] = current.next return head def snake_case_ ( lowerCAmelCase_ : Node ): if head_node is not None and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): print_reverse(head_node.next ) print(head_node.data ) def snake_case_ ( ): from doctest import testmod testmod() __lowercase : List[Any] = make_linked_list([14, 52, 14, 12, 43] ) print("""Linked List:""" ) print(lowerCAmelCase_ ) print("""Elements in Reverse:""" ) print_reverse(lowerCAmelCase_ ) if __name__ == "__main__": main()
149
0
'''simple docstring''' from __future__ import annotations def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' return [ord(_lowerCamelCase ) - 96 for elem in plain] def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' return "".join(chr(elem + 96 ) for elem in encoded ) def lowerCamelCase ( ): '''simple docstring''' __UpperCamelCase :List[str] = encode(input('''-> ''' ).strip().lower() ) print('''Encoded: ''' , _lowerCamelCase ) print('''Decoded:''' , decode(_lowerCamelCase ) ) if __name__ == "__main__": main()
719
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() __lowercase = logging.get_logger(__name__) def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :Dict = DPTConfig(embedding_type='''hybrid''' ) if "large" in checkpoint_url: __UpperCamelCase :Dict = 1_024 __UpperCamelCase :Optional[int] = 4_096 __UpperCamelCase :int = 24 __UpperCamelCase :List[str] = 16 __UpperCamelCase :List[str] = [5, 11, 17, 23] __UpperCamelCase :Optional[int] = [256, 512, 1_024, 1_024] __UpperCamelCase :str = (1, 384, 384) if "nyu" or "midas" in checkpoint_url: __UpperCamelCase :str = 768 __UpperCamelCase :Dict = [1, 1, 1, 0.5] __UpperCamelCase :List[str] = [256, 512, 768, 768] __UpperCamelCase :int = 150 __UpperCamelCase :Union[str, Any] = 16 __UpperCamelCase :str = (1, 384, 384) __UpperCamelCase :List[str] = False __UpperCamelCase :List[Any] = '''project''' if "ade" in checkpoint_url: __UpperCamelCase :List[Any] = True __UpperCamelCase :Optional[Any] = 768 __UpperCamelCase :List[Any] = [1, 1, 1, 0.5] __UpperCamelCase :List[str] = 150 __UpperCamelCase :Optional[int] = 16 __UpperCamelCase :Optional[int] = '''huggingface/label-files''' __UpperCamelCase :Dict = '''ade20k-id2label.json''' __UpperCamelCase :int = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) ) , '''r''' ) ) __UpperCamelCase :Any = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} __UpperCamelCase :Tuple = idalabel __UpperCamelCase :Dict = {v: k for k, v in idalabel.items()} __UpperCamelCase :Optional[int] = [1, 150, 480, 480] return config, expected_shape def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :Optional[int] = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias'''] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): __UpperCamelCase :Dict = name.replace('''pretrained.model''' , '''dpt.encoder''' ) if "pretrained.model" in name: __UpperCamelCase :Optional[int] = name.replace('''pretrained.model''' , '''dpt.embeddings''' ) if "patch_embed" in name: __UpperCamelCase :Any = name.replace('''patch_embed''' , '''''' ) if "pos_embed" in name: __UpperCamelCase :Dict = name.replace('''pos_embed''' , '''position_embeddings''' ) if "attn.proj" in name: __UpperCamelCase :List[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "proj" in name and "project" not in name: __UpperCamelCase :Any = name.replace('''proj''' , '''projection''' ) if "blocks" in name: __UpperCamelCase :List[Any] = name.replace('''blocks''' , '''layer''' ) if "mlp.fc1" in name: __UpperCamelCase :int = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: __UpperCamelCase :List[str] = name.replace('''mlp.fc2''' , '''output.dense''' ) if "norm1" in name and "backbone" not in name: __UpperCamelCase :Dict = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name and "backbone" not in name: __UpperCamelCase :Optional[int] = name.replace('''norm2''' , '''layernorm_after''' ) if "scratch.output_conv" in name: __UpperCamelCase :List[str] = name.replace('''scratch.output_conv''' , '''head''' ) if "scratch" in name: __UpperCamelCase :Optional[int] = name.replace('''scratch''' , '''neck''' ) if "layer1_rn" in name: __UpperCamelCase :int = name.replace('''layer1_rn''' , '''convs.0''' ) if "layer2_rn" in name: __UpperCamelCase :Dict = name.replace('''layer2_rn''' , '''convs.1''' ) if "layer3_rn" in name: __UpperCamelCase :Dict = name.replace('''layer3_rn''' , '''convs.2''' ) if "layer4_rn" in name: __UpperCamelCase :int = name.replace('''layer4_rn''' , '''convs.3''' ) if "refinenet" in name: __UpperCamelCase :Tuple = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 __UpperCamelCase :Dict = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" ) if "out_conv" in name: __UpperCamelCase :Union[str, Any] = name.replace('''out_conv''' , '''projection''' ) if "resConfUnit1" in name: __UpperCamelCase :int = name.replace('''resConfUnit1''' , '''residual_layer1''' ) if "resConfUnit2" in name: __UpperCamelCase :str = name.replace('''resConfUnit2''' , '''residual_layer2''' ) if "conv1" in name: __UpperCamelCase :Any = name.replace('''conv1''' , '''convolution1''' ) if "conv2" in name: __UpperCamelCase :Optional[int] = name.replace('''conv2''' , '''convolution2''' ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: __UpperCamelCase :Union[str, Any] = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' ) if "pretrained.act_postprocess2.0.project.0" in name: __UpperCamelCase :Tuple = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' ) if "pretrained.act_postprocess3.0.project.0" in name: __UpperCamelCase :Any = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' ) if "pretrained.act_postprocess4.0.project.0" in name: __UpperCamelCase :Tuple = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' ) # resize blocks if "pretrained.act_postprocess1.3" in name: __UpperCamelCase :Tuple = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' ) if "pretrained.act_postprocess1.4" in name: __UpperCamelCase :Optional[int] = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' ) if "pretrained.act_postprocess2.3" in name: __UpperCamelCase :Tuple = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' ) if "pretrained.act_postprocess2.4" in name: __UpperCamelCase :Dict = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' ) if "pretrained.act_postprocess3.3" in name: __UpperCamelCase :Dict = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' ) if "pretrained.act_postprocess4.3" in name: __UpperCamelCase :Optional[int] = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' ) if "pretrained.act_postprocess4.4" in name: __UpperCamelCase :List[str] = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' ) if "pretrained" in name: __UpperCamelCase :Optional[int] = name.replace('''pretrained''' , '''dpt''' ) if "bn" in name: __UpperCamelCase :List[str] = name.replace('''bn''' , '''batch_norm''' ) if "head" in name: __UpperCamelCase :Tuple = name.replace('''head''' , '''head.head''' ) if "encoder.norm" in name: __UpperCamelCase :List[str] = name.replace('''encoder.norm''' , '''layernorm''' ) if "auxlayer" in name: __UpperCamelCase :Optional[Any] = name.replace('''auxlayer''' , '''auxiliary_head.head''' ) if "backbone" in name: __UpperCamelCase :int = name.replace('''backbone''' , '''backbone.bit.encoder''' ) if ".." in name: __UpperCamelCase :Tuple = name.replace('''..''' , '''.''' ) if "stem.conv" in name: __UpperCamelCase :List[Any] = name.replace('''stem.conv''' , '''bit.embedder.convolution''' ) if "blocks" in name: __UpperCamelCase :Tuple = name.replace('''blocks''' , '''layers''' ) if "convolution" in name and "backbone" in name: __UpperCamelCase :List[Any] = name.replace('''convolution''' , '''conv''' ) if "layer" in name and "backbone" in name: __UpperCamelCase :Dict = name.replace('''layer''' , '''layers''' ) if "backbone.bit.encoder.bit" in name: __UpperCamelCase :Dict = name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''' ) if "embedder.conv" in name: __UpperCamelCase :Optional[Any] = name.replace('''embedder.conv''' , '''embedder.convolution''' ) if "backbone.bit.encoder.stem.norm" in name: __UpperCamelCase :List[Any] = name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''' ) return name def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __UpperCamelCase :Any = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" ) __UpperCamelCase :Dict = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict __UpperCamelCase :Union[str, Any] = in_proj_weight[: config.hidden_size, :] __UpperCamelCase :Dict = in_proj_bias[: config.hidden_size] __UpperCamelCase :Union[str, Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __UpperCamelCase :Dict = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __UpperCamelCase :str = in_proj_weight[ -config.hidden_size :, : ] __UpperCamelCase :Union[str, Any] = in_proj_bias[-config.hidden_size :] def lowerCamelCase ( ): '''simple docstring''' __UpperCamelCase :Any = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __UpperCamelCase :List[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase :Union[str, Any] = get_dpt_config(SCREAMING_SNAKE_CASE ) # load original state_dict from URL # state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") __UpperCamelCase :Optional[Any] = torch.load(SCREAMING_SNAKE_CASE , map_location='''cpu''' ) # remove certain keys remove_ignore_keys_(SCREAMING_SNAKE_CASE ) # rename keys for key in state_dict.copy().keys(): __UpperCamelCase :Any = state_dict.pop(SCREAMING_SNAKE_CASE ) __UpperCamelCase :Optional[int] = val # read in qkv matrices read_in_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # load HuggingFace model __UpperCamelCase :Union[str, Any] = DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(SCREAMING_SNAKE_CASE ) model.load_state_dict(SCREAMING_SNAKE_CASE ) model.eval() # Check outputs on an image __UpperCamelCase :Any = 480 if '''ade''' in checkpoint_url else 384 __UpperCamelCase :Optional[Any] = DPTImageProcessor(size=SCREAMING_SNAKE_CASE ) __UpperCamelCase :Dict = prepare_img() __UpperCamelCase :Any = image_processor(SCREAMING_SNAKE_CASE , return_tensors='''pt''' ) # forward pass __UpperCamelCase :Optional[Any] = model(**SCREAMING_SNAKE_CASE ).logits if '''ade''' in checkpoint_url else model(**SCREAMING_SNAKE_CASE ).predicted_depth if show_prediction: __UpperCamelCase :str = ( torch.nn.functional.interpolate( outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=SCREAMING_SNAKE_CASE , ) .squeeze() .cpu() .numpy() ) Image.fromarray((prediction / prediction.max()) * 255 ).show() if pytorch_dump_folder_path is not None: Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(SCREAMING_SNAKE_CASE ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE ) if push_to_hub: model.push_to_hub('''ybelkada/dpt-hybrid-midas''' ) image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''', type=str, help='''URL of the original DPT checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=False, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', ) parser.add_argument( '''--model_name''', default='''dpt-large''', type=str, help='''Name of the model, in case you\'re pushing to the hub.''', ) parser.add_argument( '''--show_prediction''', action='''store_true''', ) __lowercase = parser.parse_args() convert_dpt_checkpoint( args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction )
452
0
"""simple docstring""" # HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers lowerCAmelCase__ = float('''nan''') class __snake_case : def __init__( self : int , __lowerCAmelCase : Optional[int] ): """simple docstring""" _lowerCamelCase : str = sys.stdout _lowerCamelCase : List[Any] = open(__lowerCAmelCase , '''a''' ) def __getattr__( self : str , __lowerCAmelCase : List[str] ): """simple docstring""" return getattr(self.stdout , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : List[Any] ): """simple docstring""" self.stdout.write(__lowerCAmelCase ) # strip tqdm codes self.file.write(re.sub(R'''^.*\r''' , '''''' , __lowerCAmelCase , 0 , re.M ) ) def snake_case_ ( A_ : str=80, A_ : List[Any]=False ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = [] # deal with critical env vars _lowerCamelCase : Union[str, Any] = ['''CUDA_VISIBLE_DEVICES'''] for key in env_keys: _lowerCamelCase : str = os.environ.get(A_, A_ ) if val is not None: cmd.append(F'''{key}={val}''' ) # python executable (not always needed if the script is executable) _lowerCamelCase : int = sys.executable if full_python_path else sys.executable.split('''/''' )[-1] cmd.append(A_ ) # now the normal args cmd += list(map(shlex.quote, sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes _lowerCamelCase : Tuple = [] _lowerCamelCase : int = '''''' while len(A_ ) > 0: current_line += F'''{cmd.pop(0 )} ''' if len(A_ ) == 0 or len(A_ ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(A_ ) _lowerCamelCase : int = '''''' return "\\\n".join(A_ ) def snake_case_ ( A_ : Tuple, A_ : Optional[Any] ): '''simple docstring''' _lowerCamelCase : List[Any] = re.sub(R'''[\\\n]+''', ''' ''', args.base_cmd ) # remove --output_dir if any and set our own _lowerCamelCase : Union[str, Any] = re.sub('''--output_dir\s+[^\s]+''', '''''', args.base_cmd ) args.base_cmd += F''' --output_dir {output_dir}''' # ensure we have --overwrite_output_dir _lowerCamelCase : Optional[int] = re.sub('''--overwrite_output_dir\s+''', '''''', args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def snake_case_ ( A_ : int, A_ : List[Any], A_ : str, A_ : List[Any], A_ : Optional[int], A_ : Union[str, Any], A_ : List[str] ): '''simple docstring''' if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0, 1_00 ) for k in metric_keys}, **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222] )}, ) _lowerCamelCase : List[str] = subprocess.run(A_, capture_output=A_, text=A_ ) if verbose: print('''STDOUT''', result.stdout ) print('''STDERR''', result.stderr ) # save the streams _lowerCamelCase : Union[str, Any] = variation.replace(''' ''', '''-''' ) with open(Path(A_ ) / F'''log.{prefix}.stdout.txt''', '''w''' ) as f: f.write(result.stdout ) with open(Path(A_ ) / F'''log.{prefix}.stderr.txt''', '''w''' ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print('''failed''' ) return {target_metric_key: nan} with io.open(F'''{output_dir}/all_results.json''', '''r''', encoding='''utf-8''' ) as f: _lowerCamelCase : str = json.load(A_ ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def snake_case_ ( A_ : Dict, A_ : List[str], A_ : Optional[int], A_ : Any, A_ : List[Any], A_ : str, A_ : Union[str, Any], A_ : List[Any], A_ : Optional[Any], A_ : Any, ): '''simple docstring''' _lowerCamelCase : Tuple = [] _lowerCamelCase : List[str] = [] _lowerCamelCase : Optional[Any] = F'''{id}: {variation:<{longest_variation_len}}''' _lowerCamelCase : int = F'''{preamble}: ''' _lowerCamelCase : List[str] = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(A_ ), desc=A_, leave=A_ ): _lowerCamelCase : Dict = process_run_single( A_, A_, A_, A_, A_, A_, A_ ) _lowerCamelCase : Dict = single_run_metrics[target_metric_key] if not math.isnan(A_ ): metrics.append(A_ ) results.append(A_ ) outcome += "✓" else: outcome += "✘" _lowerCamelCase : List[Any] = F'''\33[2K\r{outcome}''' if len(A_ ) > 0: _lowerCamelCase : str = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} _lowerCamelCase : Optional[Any] = round(mean_metrics[target_metric_key], 2 ) _lowerCamelCase : List[Any] = F'''{outcome} {mean_target}''' if len(A_ ) > 1: results_str += F''' {tuple(round(A_, 2 ) for x in results )}''' print(A_ ) _lowerCamelCase : Optional[Any] = variation return mean_metrics else: print(A_ ) return {variation_key: variation, target_metric_key: nan} def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Optional[Any] = torch.cuda.get_device_properties(torch.device('''cuda''' ) ) return F''' Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )} Software: transformers: {transformers.__version__} torch : {torch.__version__} cuda : {torch.version.cuda} python : {platform.python_version()} Hardware: {torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB ''' def snake_case_ ( A_ : Union[str, Any], A_ : str, A_ : int, A_ : int, A_ : Any ): '''simple docstring''' _lowerCamelCase : str = pd.DataFrame(A_ ) _lowerCamelCase : Optional[Any] = '''variation''' _lowerCamelCase : List[Any] = '''diff_%''' _lowerCamelCase : Optional[int] = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan _lowerCamelCase : Optional[Any] = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(A_ ): # as a fallback, use the minimal value as the sentinel _lowerCamelCase : str = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(A_ ): _lowerCamelCase : str = df.apply( lambda A_ : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0, axis='''columns''', ) # re-order columns _lowerCamelCase : str = [variation_key, target_metric_key, diff_key, *report_metric_keys] _lowerCamelCase : Tuple = df.reindex(A_, axis='''columns''' ) # reorder cols # capitalize _lowerCamelCase : Any = df.rename(str.capitalize, axis='''columns''' ) # make the cols as narrow as possible _lowerCamelCase : int = df.rename(lambda A_ : c.replace('''_''', '''<br>''' ), axis='''columns''' ) _lowerCamelCase : int = df.rename(lambda A_ : c.replace('''_''', '''\n''' ), axis='''columns''' ) _lowerCamelCase : str = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum'''] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=A_, floatfmt='''.2f''' )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=A_, floatfmt='''.2f''' )] print('''\n\n'''.join(A_ ) ) def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument( '''--base-cmd''', default=A_, type=A_, required=A_, help='''Base cmd''', ) parser.add_argument( '''--variations''', default=A_, type=A_, nargs='''+''', required=A_, help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''', ) parser.add_argument( '''--base-variation''', default=A_, type=A_, help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''', ) parser.add_argument( '''--target-metric-key''', default=A_, type=A_, required=A_, help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''', ) parser.add_argument( '''--report-metric-keys''', default='''''', type=A_, help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''', ) parser.add_argument( '''--repeat-times''', default=1, type=A_, help='''How many times to re-run each variation - an average will be reported''', ) parser.add_argument( '''--output_dir''', default='''output_benchmark''', type=A_, help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''', ) parser.add_argument( '''--verbose''', default=A_, action='''store_true''', help='''Whether to show the outputs of each run or just the benchmark progress''', ) _lowerCamelCase : Union[str, Any] = parser.parse_args() _lowerCamelCase : List[str] = args.output_dir Path(A_ ).mkdir(exist_ok=A_ ) _lowerCamelCase : List[Any] = get_base_command(A_, A_ ) # split each dimension into its --foo variations _lowerCamelCase : Tuple = [list(map(str.strip, re.split(R'''\|''', A_ ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty _lowerCamelCase : str = list(map(str.strip, map(''' '''.join, itertools.product(*A_ ) ) ) ) _lowerCamelCase : Optional[Any] = max(len(A_ ) for x in variations ) # split wanted keys _lowerCamelCase : List[str] = args.report_metric_keys.split() # capture prints into a log file for convenience _lowerCamelCase : List[str] = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt''' print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' ) print(F'''and this script\'s output is also piped into {report_fn}''' ) _lowerCamelCase : Tuple = Tee(A_ ) print(F'''\n*** Running {len(A_ )} benchmarks:''' ) print(F'''Base command: {" ".join(A_ )}''' ) _lowerCamelCase : Tuple = '''variation''' _lowerCamelCase : Optional[Any] = [] for id, variation in enumerate(tqdm(A_, desc='''Total completion: ''', leave=A_ ) ): _lowerCamelCase : Dict = base_cmd + variation.split() results.append( process_run( id + 1, A_, A_, A_, A_, args.target_metric_key, A_, args.repeat_times, A_, args.verbose, ) ) process_results(A_, args.target_metric_key, A_, args.base_variation, A_ ) if __name__ == "__main__": main()
83
'''simple docstring''' import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCAmelCase : def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=2 , lowerCAmelCase_=8 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=16 , lowerCAmelCase_=5 , lowerCAmelCase_=2 , lowerCAmelCase_=36 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=5_12 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ): """simple docstring""" _snake_case = parent _snake_case = batch_size _snake_case = seq_length _snake_case = is_training _snake_case = use_input_mask _snake_case = use_token_type_ids _snake_case = use_labels _snake_case = vocab_size _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = intermediate_size _snake_case = hidden_act _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = max_position_embeddings _snake_case = type_vocab_size _snake_case = type_sequence_label_size _snake_case = initializer_range _snake_case = num_labels _snake_case = num_choices _snake_case = scope def lowerCamelCase ( self ): """simple docstring""" _snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case = None if self.use_input_mask: _snake_case = random_attention_mask([self.batch_size, self.seq_length] ) _snake_case = None if self.use_token_type_ids: _snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _snake_case = None _snake_case = None _snake_case = None if self.use_labels: _snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _snake_case = ids_tensor([self.batch_size] , self.num_choices ) _snake_case = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase ( self ): """simple docstring""" return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = self.get_config() _snake_case = 3_00 return config def lowerCamelCase ( self ): """simple docstring""" ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) = self.prepare_config_and_inputs() _snake_case = True _snake_case = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case = MraModel(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ ) _snake_case = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ ) _snake_case = model(lowerCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ): """simple docstring""" _snake_case = True _snake_case = MraModel(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _snake_case = model( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , ) _snake_case = model( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , ) _snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case = MraForMaskedLM(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case = MraForQuestionAnswering(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _snake_case = model( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case = self.num_labels _snake_case = MraForSequenceClassification(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case = self.num_labels _snake_case = MraForTokenClassification(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case = self.num_choices _snake_case = MraForMultipleChoice(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _snake_case = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _snake_case = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _snake_case = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _snake_case = model( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = self.prepare_config_and_inputs() ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) = config_and_inputs _snake_case = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ): __lowercase = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) __lowercase = False __lowercase = False __lowercase = False __lowercase = False __lowercase = () def lowerCamelCase ( self ): """simple docstring""" _snake_case = MraModelTester(self ) _snake_case = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 ) def lowerCamelCase ( self ): """simple docstring""" self.config_tester.run_common_tests() def lowerCamelCase ( self ): """simple docstring""" _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase_ ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _snake_case = type self.model_tester.create_and_check_model(*lowerCAmelCase_ ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase_ ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_ ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ ) @slow def lowerCamelCase ( self ): """simple docstring""" for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case = MraModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) @unittest.skip(reason='MRA does not output attentions' ) def lowerCamelCase ( self ): """simple docstring""" return @require_torch class __UpperCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase ( self ): """simple docstring""" _snake_case = MraModel.from_pretrained('uw-madison/mra-base-512-4' ) _snake_case = torch.arange(2_56 ).unsqueeze(0 ) with torch.no_grad(): _snake_case = model(lowerCAmelCase_ )[0] _snake_case = torch.Size((1, 2_56, 7_68) ) self.assertEqual(output.shape , lowerCAmelCase_ ) _snake_case = torch.tensor( [[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1E-4 ) ) @slow def lowerCamelCase ( self ): """simple docstring""" _snake_case = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' ) _snake_case = torch.arange(2_56 ).unsqueeze(0 ) with torch.no_grad(): _snake_case = model(lowerCAmelCase_ )[0] _snake_case = 5_02_65 _snake_case = torch.Size((1, 2_56, vocab_size) ) self.assertEqual(output.shape , lowerCAmelCase_ ) _snake_case = torch.tensor( [[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1E-4 ) ) @slow def lowerCamelCase ( self ): """simple docstring""" _snake_case = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' ) _snake_case = torch.arange(40_96 ).unsqueeze(0 ) with torch.no_grad(): _snake_case = model(lowerCAmelCase_ )[0] _snake_case = 5_02_65 _snake_case = torch.Size((1, 40_96, vocab_size) ) self.assertEqual(output.shape , lowerCAmelCase_ ) _snake_case = torch.tensor( [[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1E-4 ) )
495
0
import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification _lowerCAmelCase = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co _lowerCAmelCase = "main" # Default branch name _lowerCAmelCase = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2" # One particular commit (not the top of `main`) _lowerCAmelCase = "aaaaaaa" # This commit does not exist, so we should 404. _lowerCAmelCase = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684" # Sha-1 of config.json on the top of `main`, for checking purposes _lowerCAmelCase = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3" @contextlib.contextmanager def _snake_case ( ): print('''Welcome!''' ) yield print('''Bye!''' ) @contextlib.contextmanager def _snake_case ( ): print('''Bonjour!''' ) yield print('''Au revoir!''' ) class lowerCAmelCase_ ( unittest.TestCase ): def UpperCamelCase_ ( self : Dict ): assert transformers.__spec__ is not None assert importlib.util.find_spec('''transformers''' ) is not None class lowerCAmelCase_ ( unittest.TestCase ): @unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO ) def UpperCamelCase_ ( self : Union[str, Any] , _A : str ): with ContextManagers([] ): print('''Transformers are awesome!''' ) # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''' ) @unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO ) def UpperCamelCase_ ( self : int , _A : List[str] ): with ContextManagers([context_en()] ): print('''Transformers are awesome!''' ) # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''' ) @unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO ) def UpperCamelCase_ ( self : Tuple , _A : Tuple ): with ContextManagers([context_fr(), context_en()] ): print('''Transformers are awesome!''' ) # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''' ) @require_torch def UpperCamelCase_ ( self : int ): self.assertEqual(find_labels(_A ) , ['''labels'''] ) self.assertEqual(find_labels(_A ) , ['''labels''', '''next_sentence_label'''] ) self.assertEqual(find_labels(_A ) , ['''start_positions''', '''end_positions'''] ) class lowerCAmelCase_ ( a__ ): pass self.assertEqual(find_labels(_A ) , ['''labels'''] ) @require_tf def UpperCamelCase_ ( self : Dict ): self.assertEqual(find_labels(_A ) , ['''labels'''] ) self.assertEqual(find_labels(_A ) , ['''labels''', '''next_sentence_label'''] ) self.assertEqual(find_labels(_A ) , ['''start_positions''', '''end_positions'''] ) class lowerCAmelCase_ ( a__ ): pass self.assertEqual(find_labels(_A ) , ['''labels'''] ) @require_flax def UpperCamelCase_ ( self : str ): self.assertEqual(find_labels(_A ) , [] ) self.assertEqual(find_labels(_A ) , [] ) self.assertEqual(find_labels(_A ) , [] ) class lowerCAmelCase_ ( a__ ): pass self.assertEqual(find_labels(_A ) , [] )
716
import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class lowerCAmelCase_ ( unittest.TestCase ): def UpperCamelCase_ ( self : Any ): _UpperCamelCase = tempfile.mkdtemp() # fmt: off _UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest'''] # fmt: on _UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) _UpperCamelCase = { '''do_resize''': True, '''size''': {'''height''': 18, '''width''': 18}, '''do_normalize''': True, '''image_mean''': [0.5, 0.5, 0.5], '''image_std''': [0.5, 0.5, 0.5], } _UpperCamelCase = os.path.join(self.tmpdirname , _A ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_A , _A ) def UpperCamelCase_ ( self : Tuple , **_A : Optional[Any] ): return BertTokenizer.from_pretrained(self.tmpdirname , **_A ) def UpperCamelCase_ ( self : List[Any] , **_A : Union[str, Any] ): return ViTImageProcessor.from_pretrained(self.tmpdirname , **_A ) def UpperCamelCase_ ( self : int ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _UpperCamelCase = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = self.get_image_processor() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) processor.save_pretrained(self.tmpdirname ) _UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , _A ) def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _UpperCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _UpperCamelCase = self.get_image_processor(do_normalize=_A , padding_value=1.0 ) _UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_A , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _A ) def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = self.prepare_image_inputs() _UpperCamelCase = image_processor(_A , return_tensors='''np''' ) _UpperCamelCase = processor(images=_A , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = '''lower newer''' _UpperCamelCase = processor(text=_A ) _UpperCamelCase = tokenizer(_A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = '''lower newer''' _UpperCamelCase = self.prepare_image_inputs() _UpperCamelCase = processor(text=_A , images=_A ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with self.assertRaises(_A ): processor() def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _UpperCamelCase = processor.batch_decode(_A ) _UpperCamelCase = tokenizer.batch_decode(_A ) self.assertListEqual(_A , _A ) def UpperCamelCase_ ( self : List[str] ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = '''lower newer''' _UpperCamelCase = self.prepare_image_inputs() _UpperCamelCase = processor(text=_A , images=_A ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
71
0
import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE_ ( _snake_case :Dict ) -> Optional[int]: _A = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: _A = 128 elif "12-12" in model_name: _A = 12 _A = 12 elif "14-14" in model_name: _A = 14 _A = 14 elif "16-16" in model_name: _A = 16 _A = 16 else: raise ValueError('''Model not supported''' ) _A = '''huggingface/label-files''' if "speech-commands" in model_name: _A = 35 _A = '''speech-commands-v2-id2label.json''' else: _A = 527 _A = '''audioset-id2label.json''' _A = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='''dataset''' ) , '''r''' ) ) _A = {int(_snake_case ): v for k, v in idalabel.items()} _A = idalabel _A = {v: k for k, v in idalabel.items()} return config def SCREAMING_SNAKE_CASE_ ( _snake_case :List[Any] ) -> Tuple: if "module.v" in name: _A = name.replace('''module.v''' , '''audio_spectrogram_transformer''' ) if "cls_token" in name: _A = name.replace('''cls_token''' , '''embeddings.cls_token''' ) if "dist_token" in name: _A = name.replace('''dist_token''' , '''embeddings.distillation_token''' ) if "pos_embed" in name: _A = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' ) if "patch_embed.proj" in name: _A = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) # transformer blocks if "blocks" in name: _A = name.replace('''blocks''' , '''encoder.layer''' ) if "attn.proj" in name: _A = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: _A = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: _A = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: _A = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: _A = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: _A = name.replace('''mlp.fc2''' , '''output.dense''' ) # final layernorm if "audio_spectrogram_transformer.norm" in name: _A = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' ) # classifier head if "module.mlp_head.0" in name: _A = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' ) if "module.mlp_head.1" in name: _A = name.replace('''module.mlp_head.1''' , '''classifier.dense''' ) return name def SCREAMING_SNAKE_CASE_ ( _snake_case :Any , _snake_case :List[str] ) -> List[Any]: for key in orig_state_dict.copy().keys(): _A = orig_state_dict.pop(_snake_case ) if "qkv" in key: _A = key.split('''.''' ) _A = int(key_split[3] ) _A = config.hidden_size if "weight" in key: _A = val[:dim, :] _A = val[dim : dim * 2, :] _A = val[-dim:, :] else: _A = val[:dim] _A = val[dim : dim * 2] _A = val[-dim:] else: _A = val return orig_state_dict def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] ) -> Dict: _A = [ '''module.v.head.weight''', '''module.v.head.bias''', '''module.v.head_dist.weight''', '''module.v.head_dist.bias''', ] for k in ignore_keys: state_dict.pop(_snake_case , _snake_case ) @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :Dict , _snake_case :List[Any]=False ) -> Optional[Any]: _A = get_audio_spectrogram_transformer_config(_snake_case ) _A = { '''ast-finetuned-audioset-10-10-0.4593''': ( '''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.450''': ( '''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.448''': ( '''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.448-v2''': ( '''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1''' ), '''ast-finetuned-audioset-12-12-0.447''': ( '''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1''' ), '''ast-finetuned-audioset-14-14-0.443''': ( '''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1''' ), '''ast-finetuned-audioset-16-16-0.442''': ( '''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1''' ), '''ast-finetuned-speech-commands-v2''': ( '''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1''' ), } # load original state_dict _A = model_name_to_url[model_name] _A = torch.hub.load_state_dict_from_url(_snake_case , map_location='''cpu''' ) # remove some keys remove_keys(_snake_case ) # rename some keys _A = convert_state_dict(_snake_case , _snake_case ) # load 🤗 model _A = ASTForAudioClassification(_snake_case ) model.eval() model.load_state_dict(_snake_case ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 _A = -4.267_7393 if '''speech-commands''' not in model_name else -6.84_5978 _A = 4.568_9974 if '''speech-commands''' not in model_name else 5.565_4526 _A = 1_024 if '''speech-commands''' not in model_name else 128 _A = ASTFeatureExtractor(mean=_snake_case , std=_snake_case , max_length=_snake_case ) if "speech-commands" in model_name: _A = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' ) _A = dataset[0]['''audio''']['''array'''] else: _A = hf_hub_download( repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , ) _A , _A = torchaudio.load(_snake_case ) _A = waveform.squeeze().numpy() _A = feature_extractor(_snake_case , sampling_rate=16_000 , return_tensors='''pt''' ) # forward pass _A = model(**_snake_case ) _A = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": _A = torch.tensor([-0.8760, -7.0042, -8.6602] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": _A = torch.tensor([-1.1986, -7.0903, -8.2718] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": _A = torch.tensor([-2.6128, -8.0080, -9.4344] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": _A = torch.tensor([-1.5080, -7.4534, -8.8917] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": _A = torch.tensor([-0.5050, -6.5833, -8.0843] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": _A = torch.tensor([-0.3826, -7.0336, -8.2413] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": _A = torch.tensor([-1.2113, -6.9101, -8.3470] ) elif model_name == "ast-finetuned-speech-commands-v2": _A = torch.tensor([6.1589, -8.0566, -8.7984] ) else: raise ValueError('''Unknown model name''' ) if not torch.allclose(logits[0, :3] , _snake_case , atol=1E-4 ): raise ValueError('''Logits don\'t match''' ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: Path(_snake_case ).mkdir(exist_ok=_snake_case ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_snake_case ) print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' ) feature_extractor.save_pretrained(_snake_case ) if push_to_hub: print('''Pushing model and feature extractor to the hub...''' ) model.push_to_hub(F'''MIT/{model_name}''' ) feature_extractor.push_to_hub(F'''MIT/{model_name}''' ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""ast-finetuned-audioset-10-10-0.4593""", type=str, help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) UpperCAmelCase_ = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
2
UpperCAmelCase_ = 0 # The first color of the flag. UpperCAmelCase_ = 1 # The second color of the flag. UpperCAmelCase_ = 2 # The third color of the flag. UpperCAmelCase_ = (red, white, blue) def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> list: if not sequence: return [] if len(_snake_case ) == 1: return list(_snake_case ) _A = 0 _A = len(_snake_case ) - 1 _A = 0 while mid <= high: if sequence[mid] == colors[0]: _A , _A = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: _A , _A = sequence[high], sequence[mid] high -= 1 else: _A = F'''The elements inside the sequence must contains only {colors} values''' raise ValueError(_snake_case ) return sequence if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ = input("""Enter numbers separated by commas:\n""").strip() UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(""",""")] print(f'{dutch_national_flag_sort(unsorted)}')
2
1
'''simple docstring''' from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case_ = logging.get_logger(__name__) snake_case_ = { """microsoft/xprophetnet-large-wiki100-cased""": ( """https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json""" ), } class a__ ( __lowerCAmelCase ): __magic_name__ : str = '''xlm-prophetnet''' __magic_name__ : Tuple = ['''past_key_values'''] __magic_name__ : Any = { '''num_attention_heads''': '''num_encoder_attention_heads''', } def __init__(self : Optional[Any], __UpperCAmelCase : int = 0.1, __UpperCAmelCase : Tuple = "gelu", __UpperCAmelCase : Union[str, Any] = 30522, __UpperCAmelCase : Optional[int] = 1024, __UpperCAmelCase : Tuple = 4096, __UpperCAmelCase : Union[str, Any] = 12, __UpperCAmelCase : int = 16, __UpperCAmelCase : Dict = 4096, __UpperCAmelCase : Optional[Any] = 12, __UpperCAmelCase : Optional[int] = 16, __UpperCAmelCase : Union[str, Any] = 0.1, __UpperCAmelCase : Tuple = 0.1, __UpperCAmelCase : int = 512, __UpperCAmelCase : List[str] = 0.02, __UpperCAmelCase : Tuple = True, __UpperCAmelCase : Optional[Any] = True, __UpperCAmelCase : Union[str, Any] = 0, __UpperCAmelCase : List[Any] = 2, __UpperCAmelCase : Tuple = 32, __UpperCAmelCase : int = 128, __UpperCAmelCase : Any = False, __UpperCAmelCase : Any = 0.0, __UpperCAmelCase : Optional[int] = True, __UpperCAmelCase : List[str] = 0, __UpperCAmelCase : Tuple = 1, __UpperCAmelCase : Union[str, Any] = 2, **__UpperCAmelCase : Tuple, ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = vocab_size SCREAMING_SNAKE_CASE : Optional[int] = hidden_size SCREAMING_SNAKE_CASE : List[str] = encoder_ffn_dim SCREAMING_SNAKE_CASE : Optional[Any] = num_encoder_layers SCREAMING_SNAKE_CASE : Tuple = num_encoder_attention_heads SCREAMING_SNAKE_CASE : Any = decoder_ffn_dim SCREAMING_SNAKE_CASE : str = num_decoder_layers SCREAMING_SNAKE_CASE : Any = num_decoder_attention_heads SCREAMING_SNAKE_CASE : int = max_position_embeddings SCREAMING_SNAKE_CASE : Any = init_std # Normal(0, this parameter) SCREAMING_SNAKE_CASE : Dict = activation_function # parameters for xlmprophetnet SCREAMING_SNAKE_CASE : List[Any] = ngram SCREAMING_SNAKE_CASE : List[Any] = num_buckets SCREAMING_SNAKE_CASE : List[str] = relative_max_distance SCREAMING_SNAKE_CASE : int = disable_ngram_loss SCREAMING_SNAKE_CASE : Union[str, Any] = eps # 3 Types of Dropout SCREAMING_SNAKE_CASE : Union[str, Any] = attention_dropout SCREAMING_SNAKE_CASE : Dict = activation_dropout SCREAMING_SNAKE_CASE : Any = dropout SCREAMING_SNAKE_CASE : str = use_cache super().__init__( pad_token_id=lowerCAmelCase_, bos_token_id=lowerCAmelCase_, eos_token_id=lowerCAmelCase_, is_encoder_decoder=lowerCAmelCase_, add_cross_attention=lowerCAmelCase_, decoder_start_token_id=lowerCAmelCase_, **lowerCAmelCase_, ) @property def lowercase__ (self : Any ) -> int: """simple docstring""" return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def lowercase__ (self : List[Any], __UpperCAmelCase : List[Any] ) -> Optional[Any]: """simple docstring""" raise NotImplementedError( '''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and''' ''' `num_decoder_layers`.''' )
717
'''simple docstring''' def __lowercase (_SCREAMING_SNAKE_CASE :int ): SCREAMING_SNAKE_CASE : Tuple = 1 for i in range(1 , num + 1 ): fact *= i return fact def __lowercase (_SCREAMING_SNAKE_CASE :int ): SCREAMING_SNAKE_CASE : List[Any] = 0 while number > 0: SCREAMING_SNAKE_CASE : List[str] = number % 10 sum_of_digits += last_digit SCREAMING_SNAKE_CASE : List[str] = number // 10 # Removing the last_digit from the given number return sum_of_digits def __lowercase (_SCREAMING_SNAKE_CASE :int = 1_00 ): SCREAMING_SNAKE_CASE : List[Any] = factorial(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : List[str] = split_and_add(_SCREAMING_SNAKE_CASE ) return result if __name__ == "__main__": print(solution(int(input("""Enter the Number: """).strip())))
355
0
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Optional[Any] = { "abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json", } class snake_case ( UpperCamelCase_ ): lowercase_ = 'gpt_neox_japanese' def __init__( self : Any , a_ : Optional[int]=3_2000 , a_ : List[Any]=2560 , a_ : Any=32 , a_ : Union[str, Any]=32 , a_ : Any=4 , a_ : List[Any]="gelu" , a_ : Optional[int]=1.00 , a_ : int=1_0000 , a_ : Optional[int]=2048 , a_ : str=0.02 , a_ : Any=1e-5 , a_ : Dict=True , a_ : Tuple=3_1996 , a_ : Any=3_1999 , a_ : str=0.1 , a_ : Optional[int]=0.0 , **a_ : List[Any] , )-> List[str]: """simple docstring""" super().__init__(bos_token_id=a_ , eos_token_id=a_ , **a_ ) SCREAMING_SNAKE_CASE__ : str = vocab_size SCREAMING_SNAKE_CASE__ : Tuple = max_position_embeddings SCREAMING_SNAKE_CASE__ : Any = hidden_size SCREAMING_SNAKE_CASE__ : Dict = num_hidden_layers SCREAMING_SNAKE_CASE__ : Optional[Any] = num_attention_heads SCREAMING_SNAKE_CASE__ : List[Any] = intermediate_multiple_size SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act SCREAMING_SNAKE_CASE__ : int = rotary_pct SCREAMING_SNAKE_CASE__ : Tuple = rotary_emb_base SCREAMING_SNAKE_CASE__ : Union[str, Any] = initializer_range SCREAMING_SNAKE_CASE__ : Optional[Any] = layer_norm_eps SCREAMING_SNAKE_CASE__ : Tuple = use_cache SCREAMING_SNAKE_CASE__ : str = attention_dropout SCREAMING_SNAKE_CASE__ : int = hidden_dropout
85
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a_ : Dict = logging.get_logger(__name__) a_ : Any = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } a_ : Union[str, Any] = { 'vocab_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json' }, 'merges_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt' }, 'tokenizer_config_file': { 'facebook/blenderbot_small-90M': ( 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json' ) }, } a_ : List[Any] = {'facebook/blenderbot_small-90M': 5_12} def __a ( __UpperCAmelCase ): a__ = set() a__ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) a__ = char a__ = set(__UpperCAmelCase ) return pairs class __UpperCamelCase ( _lowercase ): """simple docstring""" _lowercase : Union[str, Any] = VOCAB_FILES_NAMES _lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP _lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase : int = ['''input_ids''', '''attention_mask'''] def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="__start__" , SCREAMING_SNAKE_CASE="__end__" , SCREAMING_SNAKE_CASE="__unk__" , SCREAMING_SNAKE_CASE="__null__" , **SCREAMING_SNAKE_CASE , ) -> List[str]: super().__init__(unk_token=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) with open(SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as vocab_handle: a__ = json.load(SCREAMING_SNAKE_CASE ) a__ = {v: k for k, v in self.encoder.items()} with open(SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as merges_handle: a__ = merges_handle.read().split('''\n''' )[1:-1] a__ = [tuple(merge.split() ) for merge in merges] a__ = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) ) a__ = {} @property def _UpperCAmelCase ( self ) -> int: return len(self.encoder ) def _UpperCAmelCase ( self ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str: if token in self.cache: return self.cache[token] a__ = re.sub('''([.,!?()])''' , R''' \1''' , SCREAMING_SNAKE_CASE ) a__ = re.sub('''(\')''' , R''' \1 ''' , SCREAMING_SNAKE_CASE ) a__ = re.sub(R'''\s{2,}''' , ''' ''' , SCREAMING_SNAKE_CASE ) if "\n" in token: a__ = token.replace('''\n''' , ''' __newln__''' ) a__ = token.split(''' ''' ) a__ = [] for token in tokens: if not len(SCREAMING_SNAKE_CASE ): continue a__ = token.lower() a__ = tuple(SCREAMING_SNAKE_CASE ) a__ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) a__ = get_pairs(SCREAMING_SNAKE_CASE ) if not pairs: words.append(SCREAMING_SNAKE_CASE ) continue while True: a__ = min(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : self.bpe_ranks.get(SCREAMING_SNAKE_CASE , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break a__ , a__ = bigram a__ = [] a__ = 0 while i < len(SCREAMING_SNAKE_CASE ): try: a__ = word.index(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) new_word.extend(word[i:j] ) a__ = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 a__ = tuple(SCREAMING_SNAKE_CASE ) a__ = new_word if len(SCREAMING_SNAKE_CASE ) == 1: break else: a__ = get_pairs(SCREAMING_SNAKE_CASE ) a__ = '''@@ '''.join(SCREAMING_SNAKE_CASE ) a__ = word[:-4] a__ = word words.append(SCREAMING_SNAKE_CASE ) return " ".join(SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]: a__ = [] a__ = re.findall(R'''\S+\n?''' , SCREAMING_SNAKE_CASE ) for token in words: split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE ).split(''' ''' ) ) ) return split_tokens def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int: a__ = token.lower() return self.encoder.get(SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) ) def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str: return self.decoder.get(SCREAMING_SNAKE_CASE , self.unk_token ) def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str: a__ = ''' '''.join(SCREAMING_SNAKE_CASE ).replace('''@@ ''' , '''''' ).strip() return out_string def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return a__ = os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) a__ = os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE , ensure_ascii=SCREAMING_SNAKE_CASE ) + '''\n''' ) a__ = 0 with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." ''' Please check that the tokenizer is not corrupted!''' ) a__ = token_index writer.write(''' '''.join(SCREAMING_SNAKE_CASE ) + '''\n''' ) index += 1 return vocab_file, merge_file
194
0
"""simple docstring""" import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def UpperCamelCase_ ( lowerCamelCase : ndarray ) -> float: """simple docstring""" return np.dot(lowerCamelCase , lowerCamelCase ) class _UpperCamelCase : """simple docstring""" def __init__( self : Any , *, snake_case : float = np.inf , snake_case : str = "linear" , snake_case : float = 0.0 , ) -> None: '''simple docstring''' __magic_name__ : Optional[int] = regularization __magic_name__ : Dict = gamma if kernel == "linear": __magic_name__ : Dict = self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError('''rbf kernel requires gamma''' ) if not isinstance(self.gamma , (float, int) ): raise ValueError('''gamma must be float or int''' ) if not self.gamma > 0: raise ValueError('''gamma must be > 0''' ) __magic_name__ : int = self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: __magic_name__ : List[str] = f"""Unknown kernel: {kernel}""" raise ValueError(snake_case ) def _UpperCAmelCase ( self : str , snake_case : ndarray , snake_case : ndarray ) -> float: '''simple docstring''' return np.dot(snake_case , snake_case ) def _UpperCAmelCase ( self : List[str] , snake_case : ndarray , snake_case : ndarray ) -> float: '''simple docstring''' return np.exp(-(self.gamma * norm_squared(vectora - vectora )) ) def _UpperCAmelCase ( self : Tuple , snake_case : list[ndarray] , snake_case : ndarray ) -> None: '''simple docstring''' __magic_name__ : List[str] = observations __magic_name__ : List[Any] = classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations ((__magic_name__) , ) : Union[str, Any] = np.shape(snake_case ) def to_minimize(snake_case : ndarray ) -> float: __magic_name__ : Union[str, Any] = 0 ((__magic_name__) , ) : Tuple = np.shape(snake_case ) for i in range(snake_case ): for j in range(snake_case ): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i] , observations[j] ) ) return 1 / 2 * s - sum(snake_case ) __magic_name__ : Optional[Any] = LinearConstraint(snake_case , 0 , 0 ) __magic_name__ : Union[str, Any] = Bounds(0 , self.regularization ) __magic_name__ : Optional[Any] = minimize( snake_case , np.ones(snake_case ) , bounds=snake_case , constraints=[ly_contraint] ).x __magic_name__ : str = l_star # calculating mean offset of separation plane to points __magic_name__ : Any = 0 for i in range(snake_case ): for j in range(snake_case ): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i] , observations[j] ) __magic_name__ : str = s / n def _UpperCAmelCase ( self : str , snake_case : ndarray ) -> int: '''simple docstring''' __magic_name__ : str = sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n] , snake_case ) for n in range(len(self.classes ) ) ) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
147
"""simple docstring""" from math import loga def UpperCamelCase_ ( lowerCamelCase : int ) -> int: """simple docstring""" if a < 0: raise ValueError('''Input value must be a positive integer''' ) elif isinstance(lowerCamelCase , lowerCamelCase ): raise TypeError('''Input value must be a \'int\' type''' ) return 0 if (a == 0) else int(loga(a & -a ) ) if __name__ == "__main__": import doctest doctest.testmod()
147
1
'''simple docstring''' import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase__ = logging.get_logger(__name__) def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): """simple docstring""" lowercase_ : List[Any] = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: lowercase_ : Dict = 128 elif "12-12" in model_name: lowercase_ : Optional[int] = 12 lowercase_ : Any = 12 elif "14-14" in model_name: lowercase_ : Tuple = 14 lowercase_ : List[str] = 14 elif "16-16" in model_name: lowercase_ : List[str] = 16 lowercase_ : Optional[Any] = 16 else: raise ValueError("Model not supported" ) lowercase_ : Optional[Any] = 'huggingface/label-files' if "speech-commands" in model_name: lowercase_ : Optional[Any] = 35 lowercase_ : Optional[Any] = 'speech-commands-v2-id2label.json' else: lowercase_ : Union[str, Any] = 527 lowercase_ : Tuple = 'audioset-id2label.json' lowercase_ : Optional[Any] = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) ) lowercase_ : str = {int(__lowercase ): v for k, v in idalabel.items()} lowercase_ : Dict = idalabel lowercase_ : List[Any] = {v: k for k, v in idalabel.items()} return config def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): """simple docstring""" if "module.v" in name: lowercase_ : List[str] = name.replace("module.v" , "audio_spectrogram_transformer" ) if "cls_token" in name: lowercase_ : Tuple = name.replace("cls_token" , "embeddings.cls_token" ) if "dist_token" in name: lowercase_ : Tuple = name.replace("dist_token" , "embeddings.distillation_token" ) if "pos_embed" in name: lowercase_ : Optional[Any] = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: lowercase_ : Optional[int] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) # transformer blocks if "blocks" in name: lowercase_ : str = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: lowercase_ : Optional[Any] = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: lowercase_ : Optional[Any] = name.replace("attn" , "attention.self" ) if "norm1" in name: lowercase_ : Optional[int] = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: lowercase_ : str = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: lowercase_ : List[Any] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: lowercase_ : Dict = name.replace("mlp.fc2" , "output.dense" ) # final layernorm if "audio_spectrogram_transformer.norm" in name: lowercase_ : List[Any] = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" ) # classifier head if "module.mlp_head.0" in name: lowercase_ : int = name.replace("module.mlp_head.0" , "classifier.layernorm" ) if "module.mlp_head.1" in name: lowercase_ : Optional[int] = name.replace("module.mlp_head.1" , "classifier.dense" ) return name def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ): """simple docstring""" for key in orig_state_dict.copy().keys(): lowercase_ : Any = orig_state_dict.pop(__lowercase ) if "qkv" in key: lowercase_ : Optional[Any] = key.split("." ) lowercase_ : List[str] = int(key_split[3] ) lowercase_ : Optional[int] = config.hidden_size if "weight" in key: lowercase_ : Optional[int] = val[:dim, :] lowercase_ : Union[str, Any] = val[dim : dim * 2, :] lowercase_ : Dict = val[-dim:, :] else: lowercase_ : Optional[int] = val[:dim] lowercase_ : Optional[int] = val[dim : dim * 2] lowercase_ : Optional[int] = val[-dim:] else: lowercase_ : Any = val return orig_state_dict def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): """simple docstring""" lowercase_ : Tuple = [ 'module.v.head.weight', 'module.v.head.bias', 'module.v.head_dist.weight', 'module.v.head_dist.bias', ] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) @torch.no_grad() def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ): """simple docstring""" lowercase_ : Any = get_audio_spectrogram_transformer_config(__lowercase ) lowercase_ : Optional[int] = { 'ast-finetuned-audioset-10-10-0.4593': ( 'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1' ), 'ast-finetuned-audioset-10-10-0.450': ( 'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1' ), 'ast-finetuned-audioset-10-10-0.448': ( 'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1' ), 'ast-finetuned-audioset-10-10-0.448-v2': ( 'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1' ), 'ast-finetuned-audioset-12-12-0.447': ( 'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1' ), 'ast-finetuned-audioset-14-14-0.443': ( 'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1' ), 'ast-finetuned-audioset-16-16-0.442': ( 'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1' ), 'ast-finetuned-speech-commands-v2': ( 'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1' ), } # load original state_dict lowercase_ : Union[str, Any] = model_name_to_url[model_name] lowercase_ : str = torch.hub.load_state_dict_from_url(__lowercase , map_location="cpu" ) # remove some keys remove_keys(__lowercase ) # rename some keys lowercase_ : Dict = convert_state_dict(__lowercase , __lowercase ) # load 🤗 model lowercase_ : Optional[Any] = ASTForAudioClassification(__lowercase ) model.eval() model.load_state_dict(__lowercase ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 lowercase_ : List[Any] = -4.2677393 if 'speech-commands' not in model_name else -6.845978 lowercase_ : Optional[Any] = 4.5689974 if 'speech-commands' not in model_name else 5.5654526 lowercase_ : Union[str, Any] = 1024 if 'speech-commands' not in model_name else 128 lowercase_ : Tuple = ASTFeatureExtractor(mean=__lowercase , std=__lowercase , max_length=__lowercase ) if "speech-commands" in model_name: lowercase_ : Any = load_dataset("speech_commands" , "v0.02" , split="validation" ) lowercase_ : Optional[int] = dataset[0]['audio']['array'] else: lowercase_ : Any = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , ) lowercase_ : int = torchaudio.load(__lowercase ) lowercase_ : Any = waveform.squeeze().numpy() lowercase_ : Dict = feature_extractor(__lowercase , sampling_rate=1_6000 , return_tensors="pt" ) # forward pass lowercase_ : Optional[Any] = model(**__lowercase ) lowercase_ : List[Any] = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": lowercase_ : List[Any] = torch.tensor([-0.8760, -7.0042, -8.6602] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": lowercase_ : Dict = torch.tensor([-1.1986, -7.0903, -8.2718] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": lowercase_ : Any = torch.tensor([-2.6128, -8.0080, -9.4344] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": lowercase_ : Optional[Any] = torch.tensor([-1.5080, -7.4534, -8.8917] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": lowercase_ : int = torch.tensor([-0.5050, -6.5833, -8.0843] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": lowercase_ : List[str] = torch.tensor([-0.3826, -7.0336, -8.2413] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": lowercase_ : List[Any] = torch.tensor([-1.2113, -6.9101, -8.3470] ) elif model_name == "ast-finetuned-speech-commands-v2": lowercase_ : int = torch.tensor([6.1589, -8.0566, -8.7984] ) else: raise ValueError("Unknown model name" ) if not torch.allclose(logits[0, :3] , __lowercase , atol=1e-4 ): raise ValueError("Logits don\'t match" ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(__lowercase ).mkdir(exist_ok=__lowercase ) print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__lowercase ) print(F"""Saving feature extractor to {pytorch_dump_folder_path}""" ) feature_extractor.save_pretrained(__lowercase ) if push_to_hub: print("Pushing model and feature extractor to the hub..." ) model.push_to_hub(F"""MIT/{model_name}""" ) feature_extractor.push_to_hub(F"""MIT/{model_name}""" ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='ast-finetuned-audioset-10-10-0.4593', type=str, help='Name of the Audio Spectrogram Transformer model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) UpperCamelCase__ = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
620
import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) _UpperCAmelCase = pytest.mark.integration @pytest.mark.parametrize('path' ,['paws', 'csv'] ) def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Tuple ): '''simple docstring''' inspect_dataset(__lowercase ,__lowercase ) A_ : Optional[Any] = path + '.py' assert script_name in os.listdir(__lowercase ) assert "__pycache__" not in os.listdir(__lowercase ) @pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.parametrize('path' ,['accuracy'] ) def UpperCamelCase ( __lowercase : Any ,__lowercase : Union[str, Any] ): '''simple docstring''' inspect_metric(__lowercase ,__lowercase ) A_ : Optional[Any] = path + '.py' assert script_name in os.listdir(__lowercase ) assert "__pycache__" not in os.listdir(__lowercase ) @pytest.mark.parametrize( 'path, config_name, expected_splits' ,[ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] ,) def UpperCamelCase ( __lowercase : List[str] ,__lowercase : Dict ,__lowercase : Dict ): '''simple docstring''' A_ : List[Any] = get_dataset_config_info(__lowercase ,config_name=__lowercase ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' ,[ ('paws', None, ValueError), ] ,) def UpperCamelCase ( __lowercase : Dict ,__lowercase : List[Any] ,__lowercase : int ): '''simple docstring''' with pytest.raises(__lowercase ): get_dataset_config_info(__lowercase ,config_name=__lowercase ) @pytest.mark.parametrize( 'path, expected' ,[ ('squad', 'plain_text'), ('acronym_identification', 'default'), ('lhoestq/squad', 'plain_text'), ('lhoestq/test', 'default'), ('lhoestq/demo1', 'lhoestq--demo1'), ('dalle-mini/wit', 'dalle-mini--wit'), ] ,) def UpperCamelCase ( __lowercase : str ,__lowercase : str ): '''simple docstring''' A_ : Any = get_dataset_config_names(__lowercase ) assert expected in config_names @pytest.mark.parametrize( 'path, expected_configs, expected_splits_in_first_config' ,[ ('squad', ['plain_text'], ['train', 'validation']), ('dalle-mini/wit', ['dalle-mini--wit'], ['train']), ('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']), ] ,) def UpperCamelCase ( __lowercase : Tuple ,__lowercase : str ,__lowercase : Optional[Any] ): '''simple docstring''' A_ : Optional[int] = get_dataset_infos(__lowercase ) assert list(infos.keys() ) == expected_configs A_ : Any = expected_configs[0] assert expected_config in infos A_ : Tuple = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( 'path, expected_config, expected_splits' ,[ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] ,) def UpperCamelCase ( __lowercase : Any ,__lowercase : Optional[Any] ,__lowercase : Dict ): '''simple docstring''' A_ : Optional[Any] = get_dataset_infos(__lowercase ) assert expected_config in infos A_ : str = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' ,[ ('paws', None, ValueError), ] ,) def UpperCamelCase ( __lowercase : Tuple ,__lowercase : Tuple ,__lowercase : str ): '''simple docstring''' with pytest.raises(__lowercase ): get_dataset_split_names(__lowercase ,config_name=__lowercase )
558
0
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin A_ = get_tests_dir("fixtures/test_sentencepiece.model") A_ = {"target_lang": "fi", "source_lang": "en"} A_ = ">>zh<<" A_ = "Helsinki-NLP/" if is_torch_available(): A_ = "pt" elif is_tf_available(): A_ = "tf" else: A_ = "jax" @require_sentencepiece class _snake_case ( _a , unittest.TestCase ): _A : Any = MarianTokenizer _A : int = False _A : Union[str, Any] = True def __UpperCamelCase ( self : Union[str, Any] ): super().setUp() SCREAMING_SNAKE_CASE:Any = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"] SCREAMING_SNAKE_CASE:Union[str, Any] = dict(zip(SCREAMING_SNAKE_CASE__ ,range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) SCREAMING_SNAKE_CASE:str = Path(self.tmpdirname ) save_json(SCREAMING_SNAKE_CASE__ ,save_dir / VOCAB_FILES_NAMES["vocab"] ) save_json(SCREAMING_SNAKE_CASE__ ,save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(SCREAMING_SNAKE_CASE__ ,save_dir / VOCAB_FILES_NAMES["source_spm"] ) copyfile(SCREAMING_SNAKE_CASE__ ,save_dir / VOCAB_FILES_NAMES["target_spm"] ) SCREAMING_SNAKE_CASE:Dict = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def __UpperCamelCase ( self : Any ,**SCREAMING_SNAKE_CASE__ : List[str] ): return MarianTokenizer.from_pretrained(self.tmpdirname ,**SCREAMING_SNAKE_CASE__ ) def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ): return ( "This is a test", "This is a test", ) def __UpperCamelCase ( self : Dict ): SCREAMING_SNAKE_CASE:Optional[int] = "</s>" SCREAMING_SNAKE_CASE:Tuple = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) ,SCREAMING_SNAKE_CASE__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) ,SCREAMING_SNAKE_CASE__ ) def __UpperCamelCase ( self : Tuple ): SCREAMING_SNAKE_CASE:List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"</s>" ) self.assertEqual(vocab_keys[1] ,"<unk>" ) self.assertEqual(vocab_keys[-1] ,"<pad>" ) self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) ,9 ) def __UpperCamelCase ( self : int ): self.assertEqual(self.get_tokenizer().vocab_size ,9 ) def __UpperCamelCase ( self : List[str] ): SCREAMING_SNAKE_CASE:Optional[Any] = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' ) SCREAMING_SNAKE_CASE:Union[str, Any] = en_de_tokenizer(["I am a small frog"] ,return_tensors=SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE:Any = [38, 121, 14, 697, 38_848, 0] self.assertListEqual(SCREAMING_SNAKE_CASE__ ,batch.input_ids[0] ) SCREAMING_SNAKE_CASE:Tuple = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE:Optional[Any] = [x.name for x in Path(SCREAMING_SNAKE_CASE__ ).glob("*" )] self.assertIn("source.spm" ,SCREAMING_SNAKE_CASE__ ) MarianTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ) def __UpperCamelCase ( self : Dict ): SCREAMING_SNAKE_CASE:List[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE:Dict = tok( ["I am a small frog" * 1_000, "I am a small frog"] ,padding=SCREAMING_SNAKE_CASE__ ,truncation=SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) self.assertEqual(batch.input_ids.shape ,(2, 512) ) def __UpperCamelCase ( self : Optional[int] ): SCREAMING_SNAKE_CASE:Optional[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE:Tuple = tok(["I am a tiny frog", "I am a small frog"] ,padding=SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) self.assertEqual(batch_smaller.input_ids.shape ,(2, 10) ) @slow def __UpperCamelCase ( self : str ): # fmt: off SCREAMING_SNAKE_CASE:List[str] = {"input_ids": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=SCREAMING_SNAKE_CASE__ ,model_name="Helsinki-NLP/opus-mt-en-de" ,revision="1a8c2263da11e68e50938f97e10cd57820bd504c" ,decode_kwargs={"use_source_tokenizer": True} ,) def __UpperCamelCase ( self : List[str] ): SCREAMING_SNAKE_CASE:Union[str, Any] = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" ) SCREAMING_SNAKE_CASE:int = "Tämä on testi" SCREAMING_SNAKE_CASE:str = "This is a test" SCREAMING_SNAKE_CASE:int = [76, 7, 2_047, 2] SCREAMING_SNAKE_CASE:Dict = [69, 12, 11, 940, 2] SCREAMING_SNAKE_CASE:Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ ).input_ids self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE:Dict = tokenizer(text_target=SCREAMING_SNAKE_CASE__ ).input_ids self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE:Dict = tokenizer.decode(SCREAMING_SNAKE_CASE__ ,skip_special_tokens=SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
715
'''simple docstring''' import argparse import importlib from pathlib import Path # Test all the extensions added in the setup A_ = [ "kernels/rwkv/wkv_cuda.cu", "kernels/rwkv/wkv_op.cpp", "kernels/deformable_detr/ms_deform_attn.h", "kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh", "models/graphormer/algos_graphormer.pyx", ] def A_ ( snake_case ): # Test all the extensions added in the setup for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": A_ = argparse.ArgumentParser() parser.add_argument("--check_lib", action="store_true", help="Whether to check the build or the actual package.") A_ = parser.parse_args() if args.check_lib: A_ = importlib.import_module("transformers") A_ = Path(transformers_module.__file__).parent else: A_ = Path.cwd() / "build/lib/transformers" if not test_custom_files_are_present(transformers_path): raise ValueError("The built release does not contain the custom files. Fix this before going further!")
465
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule SCREAMING_SNAKE_CASE_ = {'tokenization_byt5': ['ByT5Tokenizer']} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
523
'''simple docstring''' def UpperCamelCase__ ( _lowercase : list ) -> list: if len(_lowercase ) < 2: return collection def circle_sort_util(_lowercase : list , _lowercase : int , _lowercase : int ) -> bool: __UpperCAmelCase: Tuple = False if low == high: return swapped __UpperCAmelCase: int = low __UpperCAmelCase: int = high while left < right: if collection[left] > collection[right]: __UpperCAmelCase, __UpperCAmelCase: List[str] = ( collection[right], collection[left], ) __UpperCAmelCase: Optional[int] = True left += 1 right -= 1 if left == right and collection[left] > collection[right + 1]: __UpperCAmelCase, __UpperCAmelCase: str = ( collection[right + 1], collection[left], ) __UpperCAmelCase: Union[str, Any] = True __UpperCAmelCase: List[str] = low + int((high - low) / 2 ) __UpperCAmelCase: Optional[int] = circle_sort_util(_lowercase , _lowercase , _lowercase ) __UpperCAmelCase: List[Any] = circle_sort_util(_lowercase , mid + 1 , _lowercase ) return swapped or left_swap or right_swap __UpperCAmelCase: str = True while is_not_sorted is True: __UpperCAmelCase: Dict = circle_sort_util(_lowercase , 0 , len(_lowercase ) - 1 ) return collection if __name__ == "__main__": SCREAMING_SNAKE_CASE_ = input('Enter numbers separated by a comma:\n').strip() SCREAMING_SNAKE_CASE_ = [int(item) for item in user_input.split(',')] print(circle_sort(unsorted))
523
1
from copy import deepcopy from typing import Optional, Union import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_tf_available, is_torch_available if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf class UpperCAmelCase__ ( __lowercase ): '''simple docstring''' UpperCamelCase = ["""image_processor"""] UpperCamelCase = """SamImageProcessor""" def __init__( self : List[str] , a_ : int ): '''simple docstring''' super().__init__(_A ) __UpperCAmelCase : Dict = self.image_processor __UpperCAmelCase : List[str] = -10 __UpperCAmelCase : Any = self.image_processor.size['''longest_edge'''] def __call__( self : Optional[Any] , a_ : Tuple=None , a_ : List[Any]=None , a_ : Dict=None , a_ : Any=None , a_ : Optional[Union[str, TensorType]] = None , **a_ : List[str] , ): '''simple docstring''' __UpperCAmelCase : Optional[int] = self.image_processor( _A , return_tensors=_A , **_A , ) # pop arguments that are not used in the foward but used nevertheless __UpperCAmelCase : Optional[Any] = encoding_image_processor['''original_sizes'''] if hasattr(_A , '''numpy''' ): # Checks if Torch or TF tensor __UpperCAmelCase : str = original_sizes.numpy() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = self._check_and_preprocess_points( input_points=_A , input_labels=_A , input_boxes=_A , ) __UpperCAmelCase : Dict = self._normalize_and_convert( _A , _A , input_points=_A , input_labels=_A , input_boxes=_A , return_tensors=_A , ) return encoding_image_processor def snake_case__ ( self : str , a_ : List[Any] , a_ : Optional[int] , a_ : Union[str, Any]=None , a_ : Optional[int]=None , a_ : Optional[Any]=None , a_ : Dict="pt" , ): '''simple docstring''' if input_points is not None: if len(_A ) != len(_A ): __UpperCAmelCase : Dict = [ self._normalize_coordinates(self.target_size , _A , original_sizes[0] ) for point in input_points ] else: __UpperCAmelCase : int = [ self._normalize_coordinates(self.target_size , _A , _A ) for point, original_size in zip(_A , _A ) ] # check that all arrays have the same shape if not all(point.shape == input_points[0].shape for point in input_points ): if input_labels is not None: __UpperCAmelCase , __UpperCAmelCase : List[Any] = self._pad_points_and_labels(_A , _A ) __UpperCAmelCase : Optional[Any] = np.array(_A ) if input_labels is not None: __UpperCAmelCase : Any = np.array(_A ) if input_boxes is not None: if len(_A ) != len(_A ): __UpperCAmelCase : List[str] = [ self._normalize_coordinates(self.target_size , _A , original_sizes[0] , is_bounding_box=_A ) for box in input_boxes ] else: __UpperCAmelCase : Optional[Any] = [ self._normalize_coordinates(self.target_size , _A , _A , is_bounding_box=_A ) for box, original_size in zip(_A , _A ) ] __UpperCAmelCase : List[str] = np.array(_A ) if input_boxes is not None: if return_tensors == "pt": __UpperCAmelCase : Optional[Any] = torch.from_numpy(_A ) # boxes batch size of 1 by default __UpperCAmelCase : Optional[Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes elif return_tensors == "tf": __UpperCAmelCase : List[str] = tf.convert_to_tensor(_A ) # boxes batch size of 1 by default __UpperCAmelCase : Optional[int] = tf.expand_dims(_A , 1 ) if len(input_boxes.shape ) != 3 else input_boxes encoding_image_processor.update({'''input_boxes''': input_boxes} ) if input_points is not None: if return_tensors == "pt": __UpperCAmelCase : Optional[Any] = torch.from_numpy(_A ) # point batch size of 1 by default __UpperCAmelCase : Any = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points elif return_tensors == "tf": __UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor(_A ) # point batch size of 1 by default __UpperCAmelCase : List[str] = tf.expand_dims(_A , 1 ) if len(input_points.shape ) != 4 else input_points encoding_image_processor.update({'''input_points''': input_points} ) if input_labels is not None: if return_tensors == "pt": __UpperCAmelCase : Any = torch.from_numpy(_A ) # point batch size of 1 by default __UpperCAmelCase : Any = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels elif return_tensors == "tf": __UpperCAmelCase : int = tf.convert_to_tensor(_A ) # point batch size of 1 by default __UpperCAmelCase : List[str] = tf.expand_dims(_A , 1 ) if len(input_labels.shape ) != 3 else input_labels encoding_image_processor.update({'''input_labels''': input_labels} ) return encoding_image_processor def snake_case__ ( self : Any , a_ : str , a_ : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = max([point.shape[0] for point in input_points] ) __UpperCAmelCase : int = [] for i, point in enumerate(_A ): if point.shape[0] != expected_nb_points: __UpperCAmelCase : int = np.concatenate( [point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 ) __UpperCAmelCase : Union[str, Any] = np.append(input_labels[i] , [self.point_pad_value] ) processed_input_points.append(_A ) __UpperCAmelCase : Tuple = processed_input_points return input_points, input_labels def snake_case__ ( self : List[Any] , a_ : int , a_ : np.ndarray , a_ : Any , a_ : Dict=False ): '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : List[str] = original_size __UpperCAmelCase , __UpperCAmelCase : int = self.image_processor._get_preprocess_shape(_A , longest_edge=_A ) __UpperCAmelCase : Dict = deepcopy(_A ).astype(_A ) if is_bounding_box: __UpperCAmelCase : Union[str, Any] = coords.reshape(-1 , 2 , 2 ) __UpperCAmelCase : Union[str, Any] = coords[..., 0] * (new_w / old_w) __UpperCAmelCase : str = coords[..., 1] * (new_h / old_h) if is_bounding_box: __UpperCAmelCase : str = coords.reshape(-1 , 4 ) return coords def snake_case__ ( self : Any , a_ : Optional[int]=None , a_ : List[str]=None , a_ : Optional[int]=None , ): '''simple docstring''' if input_points is not None: if hasattr(_A , '''numpy''' ): # Checks for TF or Torch tensor __UpperCAmelCase : Tuple = input_points.numpy().tolist() if not isinstance(_A , _A ) or not isinstance(input_points[0] , _A ): raise ValueError('''Input points must be a list of list of floating points.''' ) __UpperCAmelCase : Optional[Any] = [np.array(_A ) for input_point in input_points] else: __UpperCAmelCase : List[str] = None if input_labels is not None: if hasattr(_A , '''numpy''' ): __UpperCAmelCase : str = input_labels.numpy().tolist() if not isinstance(_A , _A ) or not isinstance(input_labels[0] , _A ): raise ValueError('''Input labels must be a list of list integers.''' ) __UpperCAmelCase : Tuple = [np.array(_A ) for label in input_labels] else: __UpperCAmelCase : Union[str, Any] = None if input_boxes is not None: if hasattr(_A , '''numpy''' ): __UpperCAmelCase : List[str] = input_boxes.numpy().tolist() if ( not isinstance(_A , _A ) or not isinstance(input_boxes[0] , _A ) or not isinstance(input_boxes[0][0] , _A ) ): raise ValueError('''Input boxes must be a list of list of list of floating points.''' ) __UpperCAmelCase : Tuple = [np.array(_A ).astype(np.floataa ) for box in input_boxes] else: __UpperCAmelCase : Optional[Any] = None return input_points, input_labels, input_boxes @property def snake_case__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase : Dict = self.image_processor.model_input_names return list(dict.fromkeys(_A ) ) def snake_case__ ( self : List[Any] , *a_ : Union[str, Any] , **a_ : Optional[Any] ): '''simple docstring''' return self.image_processor.post_process_masks(*_A , **_A )
705
import importlib.util import json import os import warnings from dataclasses import dataclass, field import torch from ..training_args import TrainingArguments from ..utils import cached_property, is_sagemaker_dp_enabled, logging __A =logging.get_logger(__name__) def a ( ): '''simple docstring''' __UpperCAmelCase : str = os.getenv('''SM_HP_MP_PARAMETERS''' , '''{}''' ) try: # Parse it and check the field "partitions" is included, it is required for model parallel. __UpperCAmelCase : Any = json.loads(_UpperCAmelCase ) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. __UpperCAmelCase : Tuple = os.getenv('''SM_FRAMEWORK_PARAMS''' , '''{}''' ) try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". __UpperCAmelCase : List[Any] = json.loads(_UpperCAmelCase ) if not mpi_options.get('''sagemaker_mpi_enabled''' , _UpperCAmelCase ): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec('''smdistributed''' ) is not None if is_sagemaker_model_parallel_available(): import smdistributed.modelparallel.torch as smp smp.init() @dataclass class UpperCAmelCase__ ( __UpperCamelCase ): '''simple docstring''' UpperCamelCase = field( default="""""" ,metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} ,) def snake_case__ ( self : List[Any] ): '''simple docstring''' super().__post_init__() warnings.warn( '''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use ''' '''`TrainingArguments` instead.''' , a_ , ) @cached_property def snake_case__ ( self : Optional[int] ): '''simple docstring''' logger.info('''PyTorch: setting up devices''' ) if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1: logger.warning( '''torch.distributed process group is initialized, but local_rank == -1. ''' '''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''' ) if self.no_cuda: __UpperCAmelCase : List[str] = torch.device('''cpu''' ) __UpperCAmelCase : Dict = 0 elif is_sagemaker_model_parallel_available(): __UpperCAmelCase : Optional[Any] = smp.local_rank() __UpperCAmelCase : int = torch.device('''cuda''' , a_ ) __UpperCAmelCase : List[str] = 1 elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 torch.distributed.init_process_group(backend='''smddp''' , timeout=self.ddp_timeout_delta ) __UpperCAmelCase : List[str] = int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK''' ) ) __UpperCAmelCase : Optional[int] = torch.device('''cuda''' , self.local_rank ) __UpperCAmelCase : Union[str, Any] = 1 elif self.local_rank == -1: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 __UpperCAmelCase : Any = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' ) # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at # the default value. __UpperCAmelCase : Dict = torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend='''nccl''' , timeout=self.ddp_timeout_delta ) __UpperCAmelCase : Optional[Any] = torch.device('''cuda''' , self.local_rank ) __UpperCAmelCase : Tuple = 1 if device.type == "cuda": torch.cuda.set_device(a_ ) return device @property def snake_case__ ( self : str ): '''simple docstring''' if is_sagemaker_model_parallel_available(): return smp.dp_size() return super().world_size @property def snake_case__ ( self : List[str] ): '''simple docstring''' return not is_sagemaker_model_parallel_available() @property def snake_case__ ( self : Dict ): '''simple docstring''' return False
241
0
"""simple docstring""" import copy import os import cva import numpy as np from matplotlib import pyplot as plt class __snake_case : def __init__( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Optional[int] = '''''' _lowerCamelCase : str = '''''' _lowerCamelCase : Any = [] _lowerCamelCase : Optional[Any] = 0 _lowerCamelCase : Tuple = 2_5_6 _lowerCamelCase : Optional[int] = 0 _lowerCamelCase : str = 0 _lowerCamelCase : Any = 0 _lowerCamelCase : Dict = 0 def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[str] ): """simple docstring""" _lowerCamelCase : List[Any] = cva.imread(__lowerCAmelCase , 0 ) _lowerCamelCase : Tuple = copy.deepcopy(self.img ) _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label='''x''' ) _lowerCamelCase : Dict = np.sum(__lowerCAmelCase ) for i in range(len(__lowerCAmelCase ) ): _lowerCamelCase : Tuple = x[i] / self.k self.sk += prk _lowerCamelCase : str = (self.L - 1) * self.sk if self.rem != 0: _lowerCamelCase : List[Any] = int(last % last ) _lowerCamelCase : Optional[Any] = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = int(np.ma.count(self.img ) / self.img[1].size ) _lowerCamelCase : Optional[int] = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): _lowerCamelCase : int = self.img[j][i] if num != self.last_list[num]: _lowerCamelCase : Optional[int] = self.last_list[num] cva.imwrite('''output_data/output.jpg''' , self.img ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" cva.imshow('''Output-Image''' , self.img ) cva.imshow('''Input-Image''' , self.original_image ) cva.waitKey(5_0_0_0 ) cva.destroyAllWindows() if __name__ == "__main__": lowerCAmelCase__ = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''') lowerCAmelCase__ = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
83
"""simple docstring""" # Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union a_ = re.compile(r"""^(?P<major>\d+)""" r"""\.(?P<minor>\d+)""" r"""\.(?P<patch>\d+)$""") @total_ordering @dataclass class A_: """simple docstring""" a_ : str a_ : Optional[str] = None a_ : Optional[Union[str, int]] = None a_ : Optional[Union[str, int]] = None a_ : Optional[Union[str, int]] = None def _lowerCAmelCase ( self ): _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = _str_to_version_tuple(self.version_str ) def __repr__( self ): return F"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}" @property def _lowerCAmelCase ( self ): return self.major, self.minor, self.patch def _lowerCAmelCase ( self , A ): if isinstance(A , A ): return Version(A ) elif isinstance(A , A ): return other raise TypeError(F"{other} (type {type(A )}) cannot be compared to version." ) def __eq__( self , A ): try: _lowerCamelCase : Dict = self._validate_operand(A ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self , A ): _lowerCamelCase : Any = self._validate_operand(A ) return self.tuple < other.tuple def __hash__( self ): return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def _lowerCAmelCase ( cls , A ): _lowerCamelCase : Optional[Any] = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def _lowerCAmelCase ( self ): return self.version_str def UpperCAmelCase_ ( __a : Any ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = _VERSION_REG.match(__a ) if not res: raise ValueError(f"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits." ) return tuple(int(__a ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] ) def UpperCAmelCase_ ( __a : List[str] ): '''simple docstring''' return ".".join(str(__a ) for v in version_tuple )
437
0
'''simple docstring''' import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType __snake_case , __snake_case , __snake_case =False, False, False @dataclass class UpperCAmelCase_ : lowerCamelCase : Optional[int] = None lowerCamelCase : bool = True lowerCamelCase : bool = True lowerCamelCase : Optional[str] = None # Automatically constructed lowerCamelCase : ClassVar[str] = "dict" lowerCamelCase : ClassVar[Any] = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} ) lowerCamelCase : str = field(default='''Audio''' , init=__lowercase , repr=__lowercase ) def __call__( self : Dict ) -> Dict: return self.pa_type def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Union[str, bytes, dict] ) -> dict: try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError('To support encoding audio data, please install \'soundfile\'.' ) from err if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): return {"bytes": None, "path": value} elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes lowerCAmelCase = BytesIO() sf.write(UpperCAmelCase__ , value['array'] , value['sampling_rate'] , format='wav' ) return {"bytes": buffer.getvalue(), "path": None} elif value.get('path' ) is not None and os.path.isfile(value['path'] ): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith('pcm' ): # "PCM" only has raw audio bytes if value.get('sampling_rate' ) is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError('To use PCM files, please specify a \'sampling_rate\' in Audio object' ) if value.get('bytes' ): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) lowerCAmelCase = np.frombuffer(value['bytes'] , dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7 else: lowerCAmelCase = np.memmap(value['path'] , dtype='h' , mode='r' ).astype(np.floataa ) / 3_2_7_6_7 lowerCAmelCase = BytesIO(bytes() ) sf.write(UpperCAmelCase__ , UpperCAmelCase__ , value['sampling_rate'] , format='wav' ) return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get('path' )} elif value.get('bytes' ) is not None or value.get('path' ) is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get('bytes' ), "path": value.get('path' )} else: raise ValueError( F'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' ) def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : dict , UpperCAmelCase__ : Optional[Dict[str, Union[str, bool, None]]] = None ) -> dict: if not self.decode: raise RuntimeError('Decoding is disabled for this feature. Please use Audio(decode=True) instead.' ) lowerCAmelCase , lowerCAmelCase = (value['path'], BytesIO(value['bytes'] )) if value['bytes'] is not None else (value['path'], None) if path is None and file is None: raise ValueError(F'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' ) try: import librosa import soundfile as sf except ImportError as err: raise ImportError('To support decoding audio files, please install \'librosa\' and \'soundfile\'.' ) from err lowerCAmelCase = xsplitext(UpperCAmelCase__ )[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( 'Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, ' 'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( 'Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, ' 'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' ) if file is None: lowerCAmelCase = token_per_repo_id or {} lowerCAmelCase = path.split('::' )[-1] try: lowerCAmelCase = string_to_dict(UpperCAmelCase__ , config.HUB_DATASETS_URL )['repo_id'] lowerCAmelCase = token_per_repo_id[repo_id] except (ValueError, KeyError): lowerCAmelCase = None with xopen(UpperCAmelCase__ , 'rb' , use_auth_token=UpperCAmelCase__ ) as f: lowerCAmelCase , lowerCAmelCase = sf.read(UpperCAmelCase__ ) else: lowerCAmelCase , lowerCAmelCase = sf.read(UpperCAmelCase__ ) lowerCAmelCase = array.T if self.mono: lowerCAmelCase = librosa.to_mono(UpperCAmelCase__ ) if self.sampling_rate and self.sampling_rate != sampling_rate: lowerCAmelCase = librosa.resample(UpperCAmelCase__ , orig_sr=UpperCAmelCase__ , target_sr=self.sampling_rate ) lowerCAmelCase = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def __UpperCAmelCase ( self : Any ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value if self.decode: raise ValueError('Cannot flatten a decoded Audio feature.' ) return { "bytes": Value('binary' ), "path": Value('string' ), } def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Union[pa.StringArray, pa.StructArray] ) -> pa.StructArray: if pa.types.is_string(storage.type ): lowerCAmelCase = pa.array([None] * len(UpperCAmelCase__ ) , type=pa.binary() ) lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): lowerCAmelCase = pa.array([None] * len(UpperCAmelCase__ ) , type=pa.string() ) lowerCAmelCase = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('array' ): lowerCAmelCase = pa.array([Audio().encode_example(UpperCAmelCase__ ) if x is not None else None for x in storage.to_pylist()] ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index('bytes' ) >= 0: lowerCAmelCase = storage.field('bytes' ) else: lowerCAmelCase = pa.array([None] * len(UpperCAmelCase__ ) , type=pa.binary() ) if storage.type.get_field_index('path' ) >= 0: lowerCAmelCase = storage.field('path' ) else: lowerCAmelCase = pa.array([None] * len(UpperCAmelCase__ ) , type=pa.string() ) lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() ) return array_cast(UpperCAmelCase__ , self.pa_type ) def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : pa.StructArray ) -> pa.StructArray: @no_op_if_value_is_null def path_to_bytes(UpperCAmelCase__ : Any ): with xopen(UpperCAmelCase__ , 'rb' ) as f: lowerCAmelCase = f.read() return bytes_ lowerCAmelCase = pa.array( [ (path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) lowerCAmelCase = pa.array( [os.path.basename(UpperCAmelCase__ ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , ) lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() ) return array_cast(UpperCAmelCase__ , self.pa_type )
703
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case =logging.get_logger(__name__) __snake_case ={ """google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""", # See all CANINE models at https://huggingface.co/models?filter=canine } class UpperCAmelCase_ ( __lowercase ): lowerCamelCase : int = '''canine''' def __init__( self : Union[str, Any] , UpperCAmelCase__ : int=7_6_8 , UpperCAmelCase__ : Dict=1_2 , UpperCAmelCase__ : Optional[int]=1_2 , UpperCAmelCase__ : List[str]=3_0_7_2 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Union[str, Any]=1_6_3_8_4 , UpperCAmelCase__ : int=1_6 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Dict=1E-12 , UpperCAmelCase__ : List[Any]=0 , UpperCAmelCase__ : List[str]=0XE_0_0_0 , UpperCAmelCase__ : Union[str, Any]=0XE_0_0_1 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]=8 , UpperCAmelCase__ : List[str]=1_6_3_8_4 , UpperCAmelCase__ : Union[str, Any]=1_2_8 , **UpperCAmelCase__ : Dict , ) -> Tuple: super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) lowerCAmelCase = max_position_embeddings lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = initializer_range lowerCAmelCase = type_vocab_size lowerCAmelCase = layer_norm_eps # Character config: lowerCAmelCase = downsampling_rate lowerCAmelCase = upsampling_kernel_size lowerCAmelCase = num_hash_functions lowerCAmelCase = num_hash_buckets lowerCAmelCase = local_transformer_stride
513
0
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : int = logging.get_logger(__name__) class UpperCAmelCase_ ( _UpperCamelCase ): '''simple docstring''' A : Union[str, Any] = 'timm_backbone' def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> Dict: super().__init__(**lowerCAmelCase_ ) snake_case_ : Tuple = backbone snake_case_ : str = num_channels snake_case_ : Tuple = features_only snake_case_ : Optional[Any] = use_pretrained_backbone snake_case_ : Union[str, Any] = True snake_case_ : List[Any] = out_indices if out_indices is not None else (-1,)
568
import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowercase ( self : Union[str, Any] ) -> List[str]: __lowerCAmelCase = [ 'safety_checker/pytorch_model.bin', 'safety_checker/model.safetensors', 'vae/diffusion_pytorch_model.bin', 'vae/diffusion_pytorch_model.safetensors', 'text_encoder/pytorch_model.bin', 'text_encoder/model.safetensors', 'unet/diffusion_pytorch_model.bin', 'unet/diffusion_pytorch_model.safetensors', ] self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) ) def lowercase ( self : str ) -> Any: __lowerCAmelCase = [ 'unet/diffusion_pytorch_model.bin', 'unet/diffusion_pytorch_model.safetensors', ] self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) ) def lowercase ( self : Tuple ) -> Optional[int]: __lowerCAmelCase = [ 'safety_checker/pytorch_model.bin', 'safety_checker/model.safetensors', 'vae/diffusion_pytorch_model.bin', 'vae/diffusion_pytorch_model.safetensors', 'text_encoder/pytorch_model.bin', 'text_encoder/model.safetensors', 'unet/diffusion_pytorch_model.bin', # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ ) ) def lowercase ( self : List[Any] ) -> List[str]: __lowerCAmelCase = [ 'text_encoder/pytorch_model.bin', 'text_encoder/model.safetensors', ] self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) ) def lowercase ( self : List[Any] ) -> int: __lowerCAmelCase = [ 'safety_checker/pytorch_model.bin', 'safety_checker/model.safetensors', 'vae/diffusion_pytorch_model.bin', 'vae/diffusion_pytorch_model.safetensors', 'text_encoder/pytorch_model.bin', # Removed: 'text_encoder/model.safetensors', 'unet/diffusion_pytorch_model.bin', 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ ) ) def lowercase ( self : str ) -> str: __lowerCAmelCase = [ 'safety_checker/pytorch_model.fp16.bin', 'safety_checker/model.fp16.safetensors', 'vae/diffusion_pytorch_model.fp16.bin', 'vae/diffusion_pytorch_model.fp16.safetensors', 'text_encoder/pytorch_model.fp16.bin', 'text_encoder/model.fp16.safetensors', 'unet/diffusion_pytorch_model.fp16.bin', 'unet/diffusion_pytorch_model.fp16.safetensors', ] __lowerCAmelCase = 'fp16' self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) ) def lowercase ( self : str ) -> List[Any]: __lowerCAmelCase = [ 'unet/diffusion_pytorch_model.fp16.bin', 'unet/diffusion_pytorch_model.fp16.safetensors', ] __lowerCAmelCase = 'fp16' self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) ) def lowercase ( self : str ) -> List[str]: # pass variant but use the non-variant filenames __lowerCAmelCase = [ 'unet/diffusion_pytorch_model.bin', 'unet/diffusion_pytorch_model.safetensors', ] __lowerCAmelCase = 'fp16' self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) ) def lowercase ( self : str ) -> Union[str, Any]: __lowerCAmelCase = [ 'safety_checker/pytorch_model.fp16.bin', 'safety_checker/model.fp16.safetensors', 'vae/diffusion_pytorch_model.fp16.bin', 'vae/diffusion_pytorch_model.fp16.safetensors', 'text_encoder/pytorch_model.fp16.bin', 'text_encoder/model.fp16.safetensors', 'unet/diffusion_pytorch_model.fp16.bin', # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] __lowerCAmelCase = 'fp16' self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) ) def lowercase ( self : str ) -> List[Any]: __lowerCAmelCase = [ 'text_encoder/pytorch_model.fp16.bin', 'text_encoder/model.fp16.safetensors', ] __lowerCAmelCase = 'fp16' self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) ) def lowercase ( self : List[str] ) -> List[Any]: # pass variant but use the non-variant filenames __lowerCAmelCase = [ 'text_encoder/pytorch_model.bin', 'text_encoder/model.safetensors', ] __lowerCAmelCase = 'fp16' self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) ) def lowercase ( self : Optional[Any] ) -> Optional[Any]: __lowerCAmelCase = [ 'safety_checker/pytorch_model.fp16.bin', 'safety_checker/model.fp16.safetensors', 'vae/diffusion_pytorch_model.fp16.bin', 'vae/diffusion_pytorch_model.fp16.safetensors', 'text_encoder/pytorch_model.fp16.bin', # 'text_encoder/model.fp16.safetensors', 'unet/diffusion_pytorch_model.fp16.bin', 'unet/diffusion_pytorch_model.fp16.safetensors', ] __lowerCAmelCase = 'fp16' self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
53
0
'''simple docstring''' def _a ( __lowerCAmelCase : int ): """simple docstring""" return str(__lowerCAmelCase ) == str(__lowerCAmelCase )[::-1] def _a ( __lowerCAmelCase : int ): """simple docstring""" return int(__lowerCAmelCase ) + int(str(__lowerCAmelCase )[::-1] ) def _a ( __lowerCAmelCase : int = 1_00_00 ): """simple docstring""" snake_case__ : Union[str, Any] = [] for num in range(1 , __lowerCAmelCase ): snake_case__ : Optional[int] = 0 snake_case__ : Dict = num while iterations < 50: snake_case__ : List[Any] = sum_reverse(__lowerCAmelCase ) iterations += 1 if is_palindrome(__lowerCAmelCase ): break else: lychrel_nums.append(__lowerCAmelCase ) return len(__lowerCAmelCase ) if __name__ == "__main__": print(f"""{solution() = }""")
502
'''simple docstring''' import argparse from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird from transformers.utils import logging logging.set_verbosity_info() def _a ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] ): """simple docstring""" snake_case__ : Any = BigBirdConfig.from_json_file(__lowerCAmelCase ) print(F"""Building PyTorch model from configuration: {config}""" ) if is_trivia_qa: snake_case__ : str = BigBirdForQuestionAnswering(__lowerCAmelCase ) else: snake_case__ : Tuple = BigBirdForPreTraining(__lowerCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_big_bird(__lowerCAmelCase , __lowerCAmelCase , is_trivia_qa=__lowerCAmelCase ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": lowerCAmelCase__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--big_bird_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head.""" ) lowerCAmelCase__ : Optional[int] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa )
502
1
def lowerCamelCase_ ( UpperCAmelCase__ = 100 ): """simple docstring""" a_ = n * (n + 1) * (2 * n + 1) / 6 a_ = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F'''{solution() = }''')
483
"""simple docstring""" def _snake_case ( _snake_case : str ) -> str: '''simple docstring''' return " ".join( ''.join(word[::-1] ) if len(_snake_case ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('''Hey wollef sroirraw'''))
7
0
from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch _snake_case : Union[str, Any] = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE__ =['''pixel_values'''] def __init__( self, _a = True, _a = None, _a = PILImageResampling.BILINEAR, _a = True, _a = None, _a = True, _a = 1 / 2_55, _a = True, _a = None, _a = None, **_a, ) -> None: super().__init__(**A_ ) __SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 2_56} __SCREAMING_SNAKE_CASE = get_size_dict(A_, default_to_square=A_ ) __SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24} __SCREAMING_SNAKE_CASE = get_size_dict(A_, param_name="crop_size" ) __SCREAMING_SNAKE_CASE = do_resize __SCREAMING_SNAKE_CASE = size __SCREAMING_SNAKE_CASE = resample __SCREAMING_SNAKE_CASE = do_center_crop __SCREAMING_SNAKE_CASE = crop_size __SCREAMING_SNAKE_CASE = do_rescale __SCREAMING_SNAKE_CASE = rescale_factor __SCREAMING_SNAKE_CASE = do_normalize __SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowerCAmelCase ( self, _a, _a, _a = PILImageResampling.BICUBIC, _a = None, **_a, ) -> np.ndarray: __SCREAMING_SNAKE_CASE = get_size_dict(A_, default_to_square=A_ ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __SCREAMING_SNAKE_CASE = get_resize_output_image_size(A_, size=size["shortest_edge"], default_to_square=A_ ) return resize(A_, size=A_, resample=A_, data_format=A_, **A_ ) def __lowerCAmelCase ( self, _a, _a, _a = None, **_a, ) -> np.ndarray: __SCREAMING_SNAKE_CASE = get_size_dict(A_ ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}''' ) return center_crop(A_, size=(size["height"], size["width"]), data_format=A_, **A_ ) def __lowerCAmelCase ( self, _a, _a, _a = None, **_a ) -> np.ndarray: return rescale(A_, scale=A_, data_format=A_, **A_ ) def __lowerCAmelCase ( self, _a, _a, _a, _a = None, **_a, ) -> np.ndarray: return normalize(A_, mean=A_, std=A_, data_format=A_, **A_ ) def __lowerCAmelCase ( self, _a, _a = None, _a = None, _a = None, _a = None, _a = None, _a = None, _a = None, _a = None, _a = None, _a = None, _a = None, _a = ChannelDimension.FIRST, **_a, ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize __SCREAMING_SNAKE_CASE = size if size is not None else self.size __SCREAMING_SNAKE_CASE = get_size_dict(A_, default_to_square=A_ ) __SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample __SCREAMING_SNAKE_CASE = do_center_crop if do_center_crop is not None else self.do_center_crop __SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else self.crop_size __SCREAMING_SNAKE_CASE = get_size_dict(A_, param_name="crop_size" ) __SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale __SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor __SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize __SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean __SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std __SCREAMING_SNAKE_CASE = make_list_of_images(A_ ) if not valid_images(A_ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. __SCREAMING_SNAKE_CASE = [to_numpy_array(A_ ) for image in images] if do_resize: __SCREAMING_SNAKE_CASE = [self.resize(image=A_, size=A_, resample=A_ ) for image in images] if do_center_crop: __SCREAMING_SNAKE_CASE = [self.center_crop(image=A_, size=A_ ) for image in images] if do_rescale: __SCREAMING_SNAKE_CASE = [self.rescale(image=A_, scale=A_ ) for image in images] if do_normalize: __SCREAMING_SNAKE_CASE = [self.normalize(image=A_, mean=A_, std=A_ ) for image in images] __SCREAMING_SNAKE_CASE = [to_channel_dimension_format(A_, A_ ) for image in images] __SCREAMING_SNAKE_CASE = {"pixel_values": images} return BatchFeature(data=A_, tensor_type=A_ ) def __lowerCAmelCase ( self, _a, _a = None ) -> Any: __SCREAMING_SNAKE_CASE = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(A_ ) != len(A_ ): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) if is_torch_tensor(A_ ): __SCREAMING_SNAKE_CASE = target_sizes.numpy() __SCREAMING_SNAKE_CASE = [] for idx in range(len(A_ ) ): __SCREAMING_SNAKE_CASE = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode="bilinear", align_corners=A_ ) __SCREAMING_SNAKE_CASE = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(A_ ) else: __SCREAMING_SNAKE_CASE = logits.argmax(dim=1 ) __SCREAMING_SNAKE_CASE = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
705
from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class __SCREAMING_SNAKE_CASE : SCREAMING_SNAKE_CASE__ =LEDConfig SCREAMING_SNAKE_CASE__ ={} SCREAMING_SNAKE_CASE__ ="""gelu""" def __init__( self, _a, _a=13, _a=7, _a=True, _a=False, _a=99, _a=32, _a=2, _a=4, _a=37, _a=0.1, _a=0.1, _a=20, _a=2, _a=1, _a=0, _a=4, ) -> Optional[int]: __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = eos_token_id __SCREAMING_SNAKE_CASE = pad_token_id __SCREAMING_SNAKE_CASE = bos_token_id __SCREAMING_SNAKE_CASE = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after __SCREAMING_SNAKE_CASE = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests __SCREAMING_SNAKE_CASE = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def __lowerCAmelCase ( self ) -> List[str]: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size ) __SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 ) __SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor], axis=1 ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) __SCREAMING_SNAKE_CASE = self.config_cls( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, attention_window=self.attention_window, **self.config_updates, ) __SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(_a, _a, _a ) __SCREAMING_SNAKE_CASE = tf.concat( [tf.zeros_like(_a )[:, :-1], tf.ones_like(_a )[:, -1:]], axis=-1, ) __SCREAMING_SNAKE_CASE = global_attention_mask return config, inputs_dict def __lowerCAmelCase ( self, _a, _a ) -> List[str]: __SCREAMING_SNAKE_CASE = TFLEDModel(config=_a ).get_decoder() __SCREAMING_SNAKE_CASE = inputs_dict["input_ids"] __SCREAMING_SNAKE_CASE = input_ids[:1, :] __SCREAMING_SNAKE_CASE = inputs_dict["attention_mask"][:1, :] __SCREAMING_SNAKE_CASE = 1 # first forward pass __SCREAMING_SNAKE_CASE = model(_a, attention_mask=_a, use_cache=_a ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3), config.vocab_size ) __SCREAMING_SNAKE_CASE = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta ) # append to next input_ids and __SCREAMING_SNAKE_CASE = tf.concat([input_ids, next_tokens], axis=-1 ) __SCREAMING_SNAKE_CASE = tf.concat([attention_mask, next_attn_mask], axis=-1 ) __SCREAMING_SNAKE_CASE = model(_a, attention_mask=_a )[0] __SCREAMING_SNAKE_CASE = model(_a, attention_mask=_a, past_key_values=_a )[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] ) # select random slice __SCREAMING_SNAKE_CASE = int(ids_tensor((1,), output_from_past.shape[-1] ) ) __SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx] __SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_a, _a, rtol=1E-3 ) def _A ( __snake_case :Any , __snake_case :Dict , __snake_case :List[Any] , __snake_case :List[Any]=None , __snake_case :Optional[Any]=None , __snake_case :Any=None , __snake_case :List[str]=None , ) -> List[str]: """simple docstring""" if attention_mask is None: __SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(__snake_case , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: __SCREAMING_SNAKE_CASE = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: __SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): SCREAMING_SNAKE_CASE__ =(TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () SCREAMING_SNAKE_CASE__ =(TFLEDForConditionalGeneration,) if is_tf_available() else () SCREAMING_SNAKE_CASE__ =( { """conversational""": TFLEDForConditionalGeneration, """feature-extraction""": TFLEDModel, """summarization""": TFLEDForConditionalGeneration, """text2text-generation""": TFLEDForConditionalGeneration, """translation""": TFLEDForConditionalGeneration, } if is_tf_available() else {} ) SCREAMING_SNAKE_CASE__ =True SCREAMING_SNAKE_CASE__ =False SCREAMING_SNAKE_CASE__ =False SCREAMING_SNAKE_CASE__ =False def __lowerCAmelCase ( self ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = TFLEDModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self, config_class=_a ) def __lowerCAmelCase ( self ) -> Any: self.config_tester.run_common_tests() def __lowerCAmelCase ( self ) -> Any: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_a ) def __lowerCAmelCase ( self ) -> List[str]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() __SCREAMING_SNAKE_CASE = tf.zeros_like(inputs_dict["attention_mask"] ) __SCREAMING_SNAKE_CASE = 2 __SCREAMING_SNAKE_CASE = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices, 1, inputs_dict["global_attention_mask"], ) __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = self.model_tester.seq_length __SCREAMING_SNAKE_CASE = self.model_tester.encoder_seq_length def check_decoder_attentions_output(_a ): __SCREAMING_SNAKE_CASE = outputs.decoder_attentions self.assertEqual(len(_a ), self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_length, seq_length], ) def check_encoder_attentions_output(_a ): __SCREAMING_SNAKE_CASE = [t.numpy() for t in outputs.encoder_attentions] __SCREAMING_SNAKE_CASE = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(_a ), self.model_tester.num_hidden_layers ) self.assertEqual(len(_a ), self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_length, seq_length], ) self.assertListEqual( list(global_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices], ) for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = model_class(_a ) __SCREAMING_SNAKE_CASE = model(self._prepare_for_class(_a, _a ) ) __SCREAMING_SNAKE_CASE = len(_a ) self.assertEqual(config.output_hidden_states, _a ) check_encoder_attentions_output(_a ) if self.is_encoder_decoder: __SCREAMING_SNAKE_CASE = model_class(_a ) __SCREAMING_SNAKE_CASE = model(self._prepare_for_class(_a, _a ) ) self.assertEqual(config.output_hidden_states, _a ) check_decoder_attentions_output(_a ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = model_class(_a ) __SCREAMING_SNAKE_CASE = model(self._prepare_for_class(_a, _a ) ) self.assertEqual(config.output_hidden_states, _a ) check_encoder_attentions_output(_a ) # Check attention is always last and order is fine __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = model_class(_a ) __SCREAMING_SNAKE_CASE = model(self._prepare_for_class(_a, _a ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(_a ) ) self.assertEqual(model.config.output_hidden_states, _a ) check_encoder_attentions_output(_a ) @unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." ) def __lowerCAmelCase ( self ) -> Tuple: pass def __lowerCAmelCase ( self ) -> Optional[int]: # TODO: Head-masking not yet implement pass def _A ( __snake_case :Optional[int] ) -> List[Any]: """simple docstring""" return tf.constant(__snake_case , dtype=tf.intaa ) _snake_case : int = 1e-4 @slow @require_tf class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> List[Any]: __SCREAMING_SNAKE_CASE = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led # change to intended input here __SCREAMING_SNAKE_CASE = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] ) __SCREAMING_SNAKE_CASE = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] ) __SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(model.config, _a, _a ) __SCREAMING_SNAKE_CASE = model(**_a )[0] __SCREAMING_SNAKE_CASE = (1, 10_24, 7_68) self.assertEqual(output.shape, _a ) # change to expected output here __SCREAMING_SNAKE_CASE = tf.convert_to_tensor( [[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]], ) tf.debugging.assert_near(output[:, :3, :3], _a, atol=1E-3 ) def __lowerCAmelCase ( self ) -> List[str]: __SCREAMING_SNAKE_CASE = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ) # change to intended input here __SCREAMING_SNAKE_CASE = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] ) __SCREAMING_SNAKE_CASE = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] ) __SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(model.config, _a, _a ) __SCREAMING_SNAKE_CASE = model(**_a )[0] __SCREAMING_SNAKE_CASE = (1, 10_24, model.config.vocab_size) self.assertEqual(output.shape, _a ) # change to expected output here __SCREAMING_SNAKE_CASE = tf.convert_to_tensor( [[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]], ) tf.debugging.assert_near(output[:, :3, :3], _a, atol=1E-3, rtol=1E-3 )
214
0
from math import factorial def __snake_case ( _lowerCAmelCase : int = 100 ) -> int: return sum(map(_lowerCAmelCase , str(factorial(_lowerCAmelCase ) ) ) ) if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
454
from timeit import timeit _lowerCAmelCase : Tuple = { '''MALAYALAM''': True, '''String''': False, '''rotor''': True, '''level''': True, '''A''': True, '''BB''': True, '''ABC''': False, '''amanaplanacanalpanama''': True, # "a man a plan a canal panama" } # Ensure our test data is valid assert all((key == key[::-1]) is value for key, value in test_data.items()) def __snake_case ( _lowerCAmelCase : str ) -> bool: A_ : List[str] = 0 A_ : str = len(_lowerCAmelCase ) - 1 while start_i < end_i: if s[start_i] == s[end_i]: start_i += 1 end_i -= 1 else: return False return True def __snake_case ( _lowerCAmelCase : str ) -> bool: A_ : int = len(_lowerCAmelCase ) // 2 A_ : Union[str, Any] = len(_lowerCAmelCase ) # We need to traverse till half of the length of string # as we can get access of the i'th last element from # i'th index. # eg: [0,1,2,3,4,5] => 4th index can be accessed # with the help of 1st index (i==n-i-1) # where n is length of string return all(s[i] == s[n - i - 1] for i in range(_lowerCAmelCase ) ) def __snake_case ( _lowerCAmelCase : str ) -> bool: if len(_lowerCAmelCase ) <= 2: return True if s[0] == s[len(_lowerCAmelCase ) - 1]: return is_palindrome_recursive(s[1:-1] ) else: return False def __snake_case ( _lowerCAmelCase : str ) -> bool: return s == s[::-1] def __snake_case ( _lowerCAmelCase : str ) -> None: A_ : int = f"all({name}(key) is value for key, value in test_data.items())" A_ : List[str] = f"from __main__ import test_data, {name}" A_ : str = 500000 A_ : List[str] = timeit(stmt=_lowerCAmelCase , setup=_lowerCAmelCase , number=_lowerCAmelCase ) print(f"{name:<35} finished {number:,} runs in {result:.5f} seconds" ) if __name__ == "__main__": for key, value in test_data.items(): assert is_palindrome(key) is is_palindrome_recursive(key) assert is_palindrome(key) is is_palindrome_slice(key) print(F'''{key:21} {value}''') print('''a man a plan a canal panama''') # finished 500,000 runs in 0.46793 seconds benchmark_function('''is_palindrome_slice''') # finished 500,000 runs in 0.85234 seconds benchmark_function('''is_palindrome''') # finished 500,000 runs in 1.32028 seconds benchmark_function('''is_palindrome_recursive''') # finished 500,000 runs in 2.08679 seconds benchmark_function('''is_palindrome_traversal''')
454
1
"""simple docstring""" from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING lowerCAmelCase__ = logging.get_logger(__name__) @add_end_docstrings(__lowercase ) class snake_case ( __lowercase ): def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): """simple docstring""" super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) requires_backends(self , '''vision''' ) self.check_model_type(SCREAMING_SNAKE_CASE_ ) def __call__(self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): """simple docstring""" return super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def _lowercase (self , **SCREAMING_SNAKE_CASE_ ): """simple docstring""" return {}, {}, {} def _lowercase (self , SCREAMING_SNAKE_CASE_ ): """simple docstring""" SCREAMING_SNAKE_CASE_ = load_image(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = image.size SCREAMING_SNAKE_CASE_ = self.image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors=self.framework ) return model_inputs def _lowercase (self , SCREAMING_SNAKE_CASE_ ): """simple docstring""" SCREAMING_SNAKE_CASE_ = self.model(**SCREAMING_SNAKE_CASE_ ) return model_outputs def _lowercase (self , SCREAMING_SNAKE_CASE_ ): """simple docstring""" SCREAMING_SNAKE_CASE_ = model_outputs.predicted_depth SCREAMING_SNAKE_CASE_ = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = prediction.squeeze().cpu().numpy() SCREAMING_SNAKE_CASE_ = (output * 2_55 / np.max(SCREAMING_SNAKE_CASE_ )).astype('''uint8''' ) SCREAMING_SNAKE_CASE_ = Image.fromarray(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = predicted_depth SCREAMING_SNAKE_CASE_ = depth return output_dict
628
"""simple docstring""" import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class snake_case ( __lowercase ): def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp() SCREAMING_SNAKE_CASE_ = 8 # DPR tok SCREAMING_SNAKE_CASE_ = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) # BART tok SCREAMING_SNAKE_CASE_ = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] SCREAMING_SNAKE_CASE_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) SCREAMING_SNAKE_CASE_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] SCREAMING_SNAKE_CASE_ = {'''unk_token''': '''<unk>'''} SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''bart_tokenizer''' ) os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] ) SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , BART_VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) ) def _lowercase (self ): """simple docstring""" return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def _lowercase (self ): """simple docstring""" return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def _lowercase (self ): """simple docstring""" return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) ) def _lowercase (self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset() SCREAMING_SNAKE_CASE_ = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: SCREAMING_SNAKE_CASE_ = dataset SCREAMING_SNAKE_CASE_ = RagRetriever( SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def _lowercase (self , SCREAMING_SNAKE_CASE_ ): """simple docstring""" SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset() SCREAMING_SNAKE_CASE_ = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , ) if from_disk: SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''dataset''' ) SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''index.faiss''' ) dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) ) dataset.drop_index('''embeddings''' ) dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) ) del dataset SCREAMING_SNAKE_CASE_ = RagRetriever( SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: SCREAMING_SNAKE_CASE_ = RagRetriever( SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , SCREAMING_SNAKE_CASE_ ) , ) return retriever def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' ) dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' ) pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) ) SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' ) SCREAMING_SNAKE_CASE_ = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset} pickle.dump(SCREAMING_SNAKE_CASE_ , open(SCREAMING_SNAKE_CASE_ , '''wb''' ) ) SCREAMING_SNAKE_CASE_ = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , ) SCREAMING_SNAKE_CASE_ = RagRetriever( SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever() SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset() retriever.save_pretrained(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 ) self.assertTrue(out is not None ) def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 ) self.assertTrue(out is not None ) def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 ) self.assertTrue(out is not None ) def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = self.get_dummy_legacy_index_retriever() SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''text'''] ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def _lowercase (self ): """simple docstring""" import torch SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever() SCREAMING_SNAKE_CASE_ = [[5, 7], [10, 11]] SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) SCREAMING_SNAKE_CASE_ = retriever(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = ( out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) SCREAMING_SNAKE_CASE_ = retriever( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , ) SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = ( # noqa: F841 out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], out['''doc_ids'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = self.get_dpr_ctx_encoder_tokenizer() SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ ) retriever.set_ctx_encoder_tokenizer(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = [[5, 7], [10, 11]] SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) SCREAMING_SNAKE_CASE_ = retriever(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ ) self.assertEqual( len(SCREAMING_SNAKE_CASE_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , SCREAMING_SNAKE_CASE_ ) # check for doc token related keys in dictionary.
628
1
"""simple docstring""" from __future__ import annotations a = [-1_0, -5, 0, 5, 5.1, 1_1, 1_3, 2_1, 3, 4, -2_1, -1_0, -5, -1, 0] a = [-5, 0, 5, 5.1, 1_1, 1_3, 2_1, -1, 4, -1, -1_0, -5, -1, 0, -1] def lowercase (snake_case__ : list[float] ) -> List[str]: '''simple docstring''' lowerCAmelCase = [] lowerCAmelCase = len(__A ) for i in range(__A ): lowerCAmelCase = -1 for j in range(i + 1 , __A ): if arr[i] < arr[j]: lowerCAmelCase = arr[j] break result.append(__A ) return result def lowercase (snake_case__ : list[float] ) -> str: '''simple docstring''' lowerCAmelCase = [] for i, outer in enumerate(__A ): lowerCAmelCase = -1 for inner in arr[i + 1 :]: if outer < inner: lowerCAmelCase = inner break result.append(__A ) return result def lowercase (snake_case__ : list[float] ) -> str: '''simple docstring''' lowerCAmelCase = len(__A ) lowerCAmelCase = [] lowerCAmelCase = [-1] * arr_size for index in reversed(range(__A ) ): if stack: while stack[-1] <= arr[index]: stack.pop() if not stack: break if stack: lowerCAmelCase = stack[-1] stack.append(arr[index] ) return result if __name__ == "__main__": from doctest import testmod from timeit import timeit testmod() print(next_greatest_element_slow(arr)) print(next_greatest_element_fast(arr)) print(next_greatest_element(arr)) a = ( 'from __main__ import arr, next_greatest_element_slow, ' 'next_greatest_element_fast, next_greatest_element' ) print( 'next_greatest_element_slow():', timeit('next_greatest_element_slow(arr)', setup=setup), ) print( 'next_greatest_element_fast():', timeit('next_greatest_element_fast(arr)', setup=setup), ) print( ' next_greatest_element():', timeit('next_greatest_element(arr)', setup=setup), )
169
'''simple docstring''' import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) from diffusers.utils import randn_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = ConsistencyModelPipeline __UpperCamelCase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS __UpperCamelCase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt __UpperCamelCase = frozenset( [ "num_inference_steps", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ] ) @property def _UpperCamelCase ( self ): '''simple docstring''' snake_case: Optional[Any] = UNetaDModel.from_pretrained( 'diffusers/consistency-models-test' , subfolder='test_unet' , ) return unet @property def _UpperCamelCase ( self ): '''simple docstring''' snake_case: Optional[int] = UNetaDModel.from_pretrained( 'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , ) return unet def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__=False ): '''simple docstring''' if class_cond: snake_case: Optional[int] = self.dummy_cond_unet else: snake_case: List[str] = self.dummy_uncond_unet # Default to CM multistep sampler snake_case: Dict = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , ) snake_case: int = { 'unet': unet, 'scheduler': scheduler, } return components def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ): '''simple docstring''' if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ): snake_case: Optional[int] = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: snake_case: Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) snake_case: Tuple = { 'batch_size': 1, 'num_inference_steps': None, 'timesteps': [22, 0], 'generator': generator, 'output_type': 'np', } return inputs def _UpperCamelCase ( self ): '''simple docstring''' snake_case: Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case: Any = self.get_dummy_components() snake_case: List[str] = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ ) snake_case: Optional[Any] = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) snake_case: Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) snake_case: Tuple = pipe(**SCREAMING_SNAKE_CASE__ ).images assert image.shape == (1, 32, 32, 3) snake_case: List[Any] = image[0, -3:, -3:, -1] snake_case: List[Any] = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _UpperCamelCase ( self ): '''simple docstring''' snake_case: List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case: Optional[Any] = self.get_dummy_components(class_cond=SCREAMING_SNAKE_CASE__ ) snake_case: Tuple = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ ) snake_case: List[str] = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) snake_case: Optional[int] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) snake_case: str = 0 snake_case: List[Any] = pipe(**SCREAMING_SNAKE_CASE__ ).images assert image.shape == (1, 32, 32, 3) snake_case: Dict = image[0, -3:, -3:, -1] snake_case: int = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _UpperCamelCase ( self ): '''simple docstring''' snake_case: int = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case: Optional[Any] = self.get_dummy_components() snake_case: Optional[int] = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ ) snake_case: Any = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) snake_case: List[str] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) snake_case: str = 1 snake_case: Dict = None snake_case: int = pipe(**SCREAMING_SNAKE_CASE__ ).images assert image.shape == (1, 32, 32, 3) snake_case: Dict = image[0, -3:, -3:, -1] snake_case: Tuple = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _UpperCamelCase ( self ): '''simple docstring''' snake_case: Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case: Dict = self.get_dummy_components(class_cond=SCREAMING_SNAKE_CASE__ ) snake_case: List[str] = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ ) snake_case: Any = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) snake_case: Optional[Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) snake_case: Dict = 1 snake_case: List[str] = None snake_case: Optional[Any] = 0 snake_case: Union[str, Any] = pipe(**SCREAMING_SNAKE_CASE__ ).images assert image.shape == (1, 32, 32, 3) snake_case: str = image[0, -3:, -3:, -1] snake_case: str = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @slow @require_torch_gpu class SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _UpperCamelCase ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="cpu" , SCREAMING_SNAKE_CASE__=torch.floataa , SCREAMING_SNAKE_CASE__=(1, 3, 64, 64) ): '''simple docstring''' snake_case: Optional[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) snake_case: Union[str, Any] = { 'num_inference_steps': None, 'timesteps': [22, 0], 'class_labels': 0, 'generator': generator, 'output_type': 'np', } if get_fixed_latents: snake_case: str = self.get_fixed_latents(seed=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ , shape=SCREAMING_SNAKE_CASE__ ) snake_case: Optional[int] = latents return inputs def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__="cpu" , SCREAMING_SNAKE_CASE__=torch.floataa , SCREAMING_SNAKE_CASE__=(1, 3, 64, 64) ): '''simple docstring''' if type(SCREAMING_SNAKE_CASE__ ) == str: snake_case: Dict = torch.device(SCREAMING_SNAKE_CASE__ ) snake_case: Dict = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) snake_case: Union[str, Any] = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ ) return latents def _UpperCamelCase ( self ): '''simple docstring''' snake_case: Any = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' ) snake_case: str = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , ) snake_case: Union[str, Any] = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ ) pipe.to(torch_device=SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) snake_case: Any = self.get_inputs() snake_case: List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images assert image.shape == (1, 64, 64, 3) snake_case: Optional[int] = image[0, -3:, -3:, -1] snake_case: List[Any] = np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def _UpperCamelCase ( self ): '''simple docstring''' snake_case: str = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' ) snake_case: Any = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , ) snake_case: Any = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ ) pipe.to(torch_device=SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) snake_case: List[str] = self.get_inputs() snake_case: List[Any] = 1 snake_case: Union[str, Any] = None snake_case: str = pipe(**SCREAMING_SNAKE_CASE__ ).images assert image.shape == (1, 64, 64, 3) snake_case: Dict = image[0, -3:, -3:, -1] snake_case: int = np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 @require_torch_a def _UpperCamelCase ( self ): '''simple docstring''' snake_case: List[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' ) snake_case: Optional[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , ) snake_case: Tuple = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ ) pipe.to(torch_device=SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) snake_case: str = self.get_inputs(get_fixed_latents=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ ) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=SCREAMING_SNAKE_CASE__ , enable_math=SCREAMING_SNAKE_CASE__ , enable_mem_efficient=SCREAMING_SNAKE_CASE__ ): snake_case: Union[str, Any] = pipe(**SCREAMING_SNAKE_CASE__ ).images assert image.shape == (1, 64, 64, 3) snake_case: Optional[Any] = image[0, -3:, -3:, -1] snake_case: Optional[Any] = np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @require_torch_a def _UpperCamelCase ( self ): '''simple docstring''' snake_case: str = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' ) snake_case: Optional[int] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , ) snake_case: Tuple = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ ) pipe.to(torch_device=SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) snake_case: Optional[int] = self.get_inputs(get_fixed_latents=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ ) snake_case: int = 1 snake_case: Optional[Any] = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=SCREAMING_SNAKE_CASE__ , enable_math=SCREAMING_SNAKE_CASE__ , enable_mem_efficient=SCREAMING_SNAKE_CASE__ ): snake_case: Dict = pipe(**SCREAMING_SNAKE_CASE__ ).images assert image.shape == (1, 64, 64, 3) snake_case: Tuple = image[0, -3:, -3:, -1] snake_case: Union[str, Any] = np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
329
0
from typing import Any def __SCREAMING_SNAKE_CASE ( UpperCamelCase : list ) -> list[Any]: """simple docstring""" if not input_list: return [] a_ = [input_list.count(UpperCamelCase ) for value in input_list] a_ = max(UpperCamelCase ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(UpperCamelCase ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
703
import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class lowerCamelCase_ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=1000 , _SCREAMING_SNAKE_CASE=[3, 3, 6, 4] , _SCREAMING_SNAKE_CASE=[48, 56, 112, 220] , ): a_ = parent a_ = batch_size a_ = num_channels a_ = is_training a_ = use_labels a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = num_labels a_ = image_size a_ = layer_depths a_ = embed_dims def __magic_name__ ( self ): a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a_ = None if self.use_labels: a_ = ids_tensor([self.batch_size] , self.num_labels ) a_ = self.get_config() return config, pixel_values, labels def __magic_name__ ( self ): return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_SCREAMING_SNAKE_CASE , layer_scale_init_value=1E-5 , ) def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): a_ = SwiftFormerModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() a_ = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): a_ = self.num_labels a_ = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() a_ = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) a_ = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a_ = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __magic_name__ ( self ): ((a_) , (a_) , (a_)) = self.prepare_config_and_inputs() a_ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ): _lowerCamelCase : List[Any] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () _lowerCamelCase : Optional[Any] = ( {"""feature-extraction""": SwiftFormerModel, """image-classification""": SwiftFormerForImageClassification} if is_torch_available() else {} ) _lowerCamelCase : Optional[Any] = False _lowerCamelCase : Dict = False _lowerCamelCase : Any = False _lowerCamelCase : Tuple = False _lowerCamelCase : List[Any] = False def __magic_name__ ( self ): a_ = SwiftFormerModelTester(self ) a_ = ConfigTester( self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def __magic_name__ ( self ): self.config_tester.run_common_tests() @unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" ) def __magic_name__ ( self ): pass def __magic_name__ ( self ): a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ = model_class(_SCREAMING_SNAKE_CASE ) a_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) ) def __magic_name__ ( self ): a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ = model_class(_SCREAMING_SNAKE_CASE ) a_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a_ = [*signature.parameters.keys()] a_ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE ) def __magic_name__ ( self ): a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) def __magic_name__ ( self ): a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE ) @slow def __magic_name__ ( self ): for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a_ = SwiftFormerModel.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) @unittest.skip(reason="""SwiftFormer does not output attentions""" ) def __magic_name__ ( self ): pass def __magic_name__ ( self ): def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): a_ = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): a_ = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) a_ = outputs.hidden_states a_ = 8 self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(_SCREAMING_SNAKE_CASE ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ = True check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a_ = True check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __magic_name__ ( self ): def _config_zero_init(_SCREAMING_SNAKE_CASE ): a_ = copy.deepcopy(_SCREAMING_SNAKE_CASE ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1E-10 ) if isinstance(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ): a_ = _config_zero_init(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return configs_no_init a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() a_ = _config_zero_init(_SCREAMING_SNAKE_CASE ) for model_class in self.all_model_classes: a_ = model_class(config=_SCREAMING_SNAKE_CASE ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def __magic_name__ ( self ): pass def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: """simple docstring""" a_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowerCamelCase_ ( unittest.TestCase ): @cached_property def __magic_name__ ( self ): return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None @slow def __magic_name__ ( self ): a_ = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(_SCREAMING_SNAKE_CASE ) a_ = self.default_image_processor a_ = prepare_img() a_ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): a_ = model(**_SCREAMING_SNAKE_CASE ) # verify the logits a_ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE ) a_ = torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(_SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
403
0
import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class A ( unittest.TestCase ): def lowerCamelCase ( self : List[Any] ) -> str: """simple docstring""" _lowerCamelCase : Any ='laion/clap-htsat-unfused' _lowerCamelCase : str =tempfile.mkdtemp() def lowerCamelCase ( self : Union[str, Any] , **lowercase_ : List[Any] ) -> Tuple: """simple docstring""" return RobertaTokenizer.from_pretrained(self.checkpoint , **lowercase_ ) def lowerCamelCase ( self : List[str] , **lowercase_ : Union[str, Any] ) -> Optional[int]: """simple docstring""" return ClapFeatureExtractor.from_pretrained(self.checkpoint , **lowercase_ ) def lowerCamelCase ( self : Union[str, Any] ) -> Any: """simple docstring""" shutil.rmtree(self.tmpdirname ) def lowerCamelCase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" _lowerCamelCase : str =self.get_tokenizer() _lowerCamelCase : Optional[Any] =self.get_feature_extractor() _lowerCamelCase : Tuple =ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ ) processor.save_pretrained(self.tmpdirname ) _lowerCamelCase : int =ClapProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowercase_ ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , lowercase_ ) def lowerCamelCase ( self : Dict ) -> int: """simple docstring""" _lowerCamelCase : Optional[Any] =ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname ) _lowerCamelCase : Dict =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) _lowerCamelCase : List[str] =self.get_feature_extractor(do_normalize=lowercase_ , padding_value=1.0 ) _lowerCamelCase : Optional[int] =ClapProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowercase_ ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.feature_extractor , lowercase_ ) def lowerCamelCase ( self : int ) -> Optional[int]: """simple docstring""" _lowerCamelCase : List[str] =self.get_feature_extractor() _lowerCamelCase : List[Any] =self.get_tokenizer() _lowerCamelCase : str =ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ ) _lowerCamelCase : Optional[int] =floats_list((3, 1000) ) _lowerCamelCase : Union[str, Any] =feature_extractor(lowercase_ , return_tensors='np' ) _lowerCamelCase : List[str] =processor(audios=lowercase_ , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowerCamelCase ( self : Tuple ) -> Dict: """simple docstring""" _lowerCamelCase : Any =self.get_feature_extractor() _lowerCamelCase : Any =self.get_tokenizer() _lowerCamelCase : int =ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ ) _lowerCamelCase : int ='This is a test string' _lowerCamelCase : Optional[int] =processor(text=lowercase_ ) _lowerCamelCase : str =tokenizer(lowercase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowerCamelCase ( self : Tuple ) -> Any: """simple docstring""" _lowerCamelCase : List[str] =self.get_feature_extractor() _lowerCamelCase : int =self.get_tokenizer() _lowerCamelCase : int =ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ ) _lowerCamelCase : Dict =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _lowerCamelCase : Dict =processor.batch_decode(lowercase_ ) _lowerCamelCase : Optional[Any] =tokenizer.batch_decode(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) def lowerCamelCase ( self : Tuple ) -> Tuple: """simple docstring""" _lowerCamelCase : Tuple =self.get_feature_extractor() _lowerCamelCase : Tuple =self.get_tokenizer() _lowerCamelCase : Tuple =ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ ) self.assertListEqual( processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
464
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCamelCase = logging.get_logger(__name__) lowerCamelCase = { 'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json', } class A ( UpperCamelCase_ , UpperCamelCase_ ): UpperCamelCase__ : str ='focalnet' def __init__( self : Optional[int] , lowercase_ : List[Any]=224 , lowercase_ : str=4 , lowercase_ : Any=3 , lowercase_ : Dict=96 , lowercase_ : List[str]=False , lowercase_ : Any=[192, 384, 768, 768] , lowercase_ : Dict=[2, 2, 6, 2] , lowercase_ : Optional[int]=[2, 2, 2, 2] , lowercase_ : List[Any]=[3, 3, 3, 3] , lowercase_ : Any="gelu" , lowercase_ : str=4.0 , lowercase_ : List[Any]=0.0 , lowercase_ : Tuple=0.1 , lowercase_ : Optional[Any]=False , lowercase_ : Optional[int]=1E-4 , lowercase_ : Dict=False , lowercase_ : Optional[Any]=False , lowercase_ : Dict=False , lowercase_ : List[Any]=0.02 , lowercase_ : Any=1E-5 , lowercase_ : Any=32 , lowercase_ : List[str]=None , lowercase_ : int=None , **lowercase_ : Optional[Any] , ) -> Optional[Any]: """simple docstring""" super().__init__(**lowercase_ ) _lowerCamelCase : Union[str, Any] =image_size _lowerCamelCase : str =patch_size _lowerCamelCase : str =num_channels _lowerCamelCase : Tuple =embed_dim _lowerCamelCase : Dict =use_conv_embed _lowerCamelCase : int =hidden_sizes _lowerCamelCase : str =depths _lowerCamelCase : int =focal_levels _lowerCamelCase : List[Any] =focal_windows _lowerCamelCase : List[Any] =hidden_act _lowerCamelCase : int =mlp_ratio _lowerCamelCase : Any =hidden_dropout_prob _lowerCamelCase : Optional[Any] =drop_path_rate _lowerCamelCase : int =use_layerscale _lowerCamelCase : str =layerscale_value _lowerCamelCase : Dict =use_post_layernorm _lowerCamelCase : str =use_post_layernorm_in_modulation _lowerCamelCase : List[Any] =normalize_modulator _lowerCamelCase : Optional[Any] =initializer_range _lowerCamelCase : List[str] =layer_norm_eps _lowerCamelCase : Optional[int] =encoder_stride _lowerCamelCase : List[str] =['stem'] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )] _lowerCamelCase , _lowerCamelCase : Union[str, Any] =get_aligned_output_features_output_indices( out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
464
1
'''simple docstring''' from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class _UpperCamelCase ( nn.Module ): '''simple docstring''' def __init__( self : str , lowerCAmelCase__ : int = 1_6 , lowerCAmelCase__ : int = 8_8 , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : float = 0.0 , lowerCAmelCase__ : int = 3_2 , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : str = "geglu" , lowerCAmelCase__ : Optional[int] = None , ): """simple docstring""" super().__init__() __SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList( [ TransformeraDModel( num_attention_heads=lowerCAmelCase__ , attention_head_dim=lowerCAmelCase__ , in_channels=lowerCAmelCase__ , num_layers=lowerCAmelCase__ , dropout=lowerCAmelCase__ , norm_num_groups=lowerCAmelCase__ , cross_attention_dim=lowerCAmelCase__ , attention_bias=lowerCAmelCase__ , sample_size=lowerCAmelCase__ , num_vector_embeds=lowerCAmelCase__ , activation_fn=lowerCAmelCase__ , num_embeds_ada_norm=lowerCAmelCase__ , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference __SCREAMING_SNAKE_CASE : Union[str, Any] = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` __SCREAMING_SNAKE_CASE : int = [7_7, 2_5_7] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` __SCREAMING_SNAKE_CASE : Tuple = [1, 0] def UpperCamelCase__ ( self : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : bool = True , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states __SCREAMING_SNAKE_CASE : List[Any] = [] __SCREAMING_SNAKE_CASE : str = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens __SCREAMING_SNAKE_CASE : Tuple = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] __SCREAMING_SNAKE_CASE : Optional[Any] = self.transformer_index_for_condition[i] __SCREAMING_SNAKE_CASE : Optional[int] = self.transformers[transformer_index]( lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , timestep=lowerCAmelCase__ , cross_attention_kwargs=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] __SCREAMING_SNAKE_CASE : str = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) __SCREAMING_SNAKE_CASE : List[str] = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=lowerCAmelCase__ )
706
'''simple docstring''' from unittest.mock import Mock, patch from file_transfer.send_file import send_file @patch("""socket.socket""" ) @patch("""builtins.open""" ) def lowerCAmelCase_ ( _lowerCamelCase: Dict , _lowerCamelCase: List[Any] ): # ===== initialization ===== __SCREAMING_SNAKE_CASE : Optional[int] = Mock() __SCREAMING_SNAKE_CASE : Optional[int] = conn, Mock() __SCREAMING_SNAKE_CASE : str = iter([1, None] ) __SCREAMING_SNAKE_CASE : Any = lambda _lowerCamelCase : next(_lowerCamelCase ) # ===== invoke ===== send_file(filename="""mytext.txt""" , testing=_lowerCamelCase ) # ===== ensurance ===== sock.assert_called_once() sock.return_value.bind.assert_called_once() sock.return_value.listen.assert_called_once() sock.return_value.accept.assert_called_once() conn.recv.assert_called_once() file.return_value.__enter__.assert_called_once() file.return_value.__enter__.return_value.read.assert_called() conn.send.assert_called_once() conn.close.assert_called_once() sock.return_value.shutdown.assert_called_once() sock.return_value.close.assert_called_once()
178
0
import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __lowerCAmelCase : """simple docstring""" def __init__( self : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple=1_3 , _lowerCAmelCase : Any=3_2 , _lowerCAmelCase : Dict=3 , _lowerCAmelCase : Tuple=4 , _lowerCAmelCase : Dict=[1_0, 2_0, 3_0, 4_0] , _lowerCAmelCase : Optional[int]=[2, 2, 3, 2] , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Any=True , _lowerCAmelCase : str=3_7 , _lowerCAmelCase : List[Any]="gelu" , _lowerCAmelCase : List[Any]=1_0 , _lowerCAmelCase : str=0.02 , _lowerCAmelCase : Any=["stage2", "stage3", "stage4"] , _lowerCAmelCase : Union[str, Any]=[2, 3, 4] , _lowerCAmelCase : Optional[int]=None , ) -> Optional[int]: """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = image_size snake_case_ = num_channels snake_case_ = num_stages snake_case_ = hidden_sizes snake_case_ = depths snake_case_ = is_training snake_case_ = use_labels snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = num_labels snake_case_ = initializer_range snake_case_ = out_features snake_case_ = out_indices snake_case_ = scope def lowerCAmelCase__ ( self : List[Any] ) -> int: """simple docstring""" snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size] , self.num_labels ) snake_case_ = self.get_config() return config, pixel_values, labels def lowerCAmelCase__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def lowerCAmelCase__ ( self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : int ) -> Dict: """simple docstring""" snake_case_ = ConvNextModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() snake_case_ = model(_lowerCAmelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def lowerCAmelCase__ ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ) -> Union[str, Any]: """simple docstring""" snake_case_ = ConvNextForImageClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() snake_case_ = model(_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase__ ( self : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Any , _lowerCAmelCase : str ) -> List[Any]: """simple docstring""" snake_case_ = ConvNextBackbone(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() snake_case_ = model(_lowerCAmelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None snake_case_ = None snake_case_ = ConvNextBackbone(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() snake_case_ = model(_lowerCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def lowerCAmelCase__ ( self : Tuple ) -> int: """simple docstring""" snake_case_ = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ = config_and_inputs snake_case_ = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( a , a , unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = ( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE = ( {'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification} if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False def lowerCAmelCase__ ( self : Optional[int] ) -> int: """simple docstring""" snake_case_ = ConvNextModelTester(self ) snake_case_ = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=3_7 ) def lowerCAmelCase__ ( self : List[Any] ) -> Optional[Any]: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCAmelCase__ ( self : int ) -> Optional[Any]: """simple docstring""" return @unittest.skip(reason="ConvNext does not use inputs_embeds" ) def lowerCAmelCase__ ( self : List[Any] ) -> int: """simple docstring""" pass @unittest.skip(reason="ConvNext does not support input and output embeddings" ) def lowerCAmelCase__ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" pass @unittest.skip(reason="ConvNext does not use feedforward chunking" ) def lowerCAmelCase__ ( self : Dict ) -> int: """simple docstring""" pass def lowerCAmelCase__ ( self : List[Any] ) -> Dict: """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = model_class(_lowerCAmelCase ) snake_case_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ = [*signature.parameters.keys()] snake_case_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , _lowerCAmelCase ) def lowerCAmelCase__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def lowerCAmelCase__ ( self : List[Any] ) -> int: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_lowerCAmelCase ) def lowerCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" def check_hidden_states_output(_lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : int ): snake_case_ = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): snake_case_ = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) ) snake_case_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states snake_case_ = self.model_tester.num_stages self.assertEqual(len(_lowerCAmelCase ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def lowerCAmelCase__ ( self : Dict ) -> Dict: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase ) @slow def lowerCAmelCase__ ( self : Any ) -> Dict: """simple docstring""" for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ = ConvNextModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) def _lowerCAmelCase ( )->List[Any]: '''simple docstring''' snake_case_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def lowerCAmelCase__ ( self : int ) -> Union[str, Any]: """simple docstring""" return AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224" ) if is_vision_available() else None @slow def lowerCAmelCase__ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" snake_case_ = ConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224" ).to(_lowerCAmelCase ) snake_case_ = self.default_image_processor snake_case_ = prepare_img() snake_case_ = image_processor(images=_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase ) # forward pass with torch.no_grad(): snake_case_ = model(**_lowerCAmelCase ) # verify the logits snake_case_ = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , _lowerCAmelCase ) snake_case_ = torch.tensor([-0.0_260, -0.4_739, 0.1_911] ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) ) @require_torch class __lowerCAmelCase ( unittest.TestCase , a ): """simple docstring""" _SCREAMING_SNAKE_CASE = (ConvNextBackbone,) if is_torch_available() else () _SCREAMING_SNAKE_CASE = ConvNextConfig _SCREAMING_SNAKE_CASE = False def lowerCAmelCase__ ( self : Any ) -> Union[str, Any]: """simple docstring""" snake_case_ = ConvNextModelTester(self )
283
import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class __lowerCAmelCase ( tf.keras.optimizers.schedules.LearningRateSchedule ): """simple docstring""" def __init__( self : Union[str, Any] , _lowerCAmelCase : float , _lowerCAmelCase : Callable , _lowerCAmelCase : int , _lowerCAmelCase : float = 1.0 , _lowerCAmelCase : str = None , ) -> str: """simple docstring""" super().__init__() snake_case_ = initial_learning_rate snake_case_ = warmup_steps snake_case_ = power snake_case_ = decay_schedule_fn snake_case_ = name def __call__( self : Any , _lowerCAmelCase : int ) -> Tuple: """simple docstring""" with tf.name_scope(self.name or "WarmUp" ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. snake_case_ = tf.cast(_lowerCAmelCase , tf.floataa ) snake_case_ = tf.cast(self.warmup_steps , tf.floataa ) snake_case_ = global_step_float / warmup_steps_float snake_case_ = self.initial_learning_rate * tf.math.pow(_lowerCAmelCase , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=_lowerCAmelCase , ) def lowerCAmelCase__ ( self : Optional[Any] ) -> Dict: """simple docstring""" return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def _lowerCAmelCase ( lowerCAmelCase_ :float , lowerCAmelCase_ :int , lowerCAmelCase_ :int , lowerCAmelCase_ :float = 0.0 , lowerCAmelCase_ :float = 0.9 , lowerCAmelCase_ :float = 0.9_9_9 , lowerCAmelCase_ :float = 1e-8 , lowerCAmelCase_ :Optional[float] = None , lowerCAmelCase_ :Optional[float] = None , lowerCAmelCase_ :float = 0.0 , lowerCAmelCase_ :float = 1.0 , lowerCAmelCase_ :Optional[List[str]] = None , )->Optional[Any]: '''simple docstring''' snake_case_ = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=lowerCAmelCase_ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=lowerCAmelCase_ , ) if num_warmup_steps: snake_case_ = WarmUp( initial_learning_rate=lowerCAmelCase_ , decay_schedule_fn=lowerCAmelCase_ , warmup_steps=lowerCAmelCase_ , ) if weight_decay_rate > 0.0: snake_case_ = AdamWeightDecay( learning_rate=lowerCAmelCase_ , weight_decay_rate=lowerCAmelCase_ , beta_a=lowerCAmelCase_ , beta_a=lowerCAmelCase_ , epsilon=lowerCAmelCase_ , clipnorm=lowerCAmelCase_ , global_clipnorm=lowerCAmelCase_ , exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"] , include_in_weight_decay=lowerCAmelCase_ , ) else: snake_case_ = tf.keras.optimizers.Adam( learning_rate=lowerCAmelCase_ , beta_a=lowerCAmelCase_ , beta_a=lowerCAmelCase_ , epsilon=lowerCAmelCase_ , clipnorm=lowerCAmelCase_ , global_clipnorm=lowerCAmelCase_ , ) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class __lowerCAmelCase ( a ): """simple docstring""" def __init__( self : Dict , _lowerCAmelCase : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , _lowerCAmelCase : float = 0.9 , _lowerCAmelCase : float = 0.999 , _lowerCAmelCase : float = 1e-7 , _lowerCAmelCase : bool = False , _lowerCAmelCase : float = 0.0 , _lowerCAmelCase : Optional[List[str]] = None , _lowerCAmelCase : Optional[List[str]] = None , _lowerCAmelCase : str = "AdamWeightDecay" , **_lowerCAmelCase : Union[str, Any] , ) -> Optional[int]: """simple docstring""" super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ) snake_case_ = weight_decay_rate snake_case_ = include_in_weight_decay snake_case_ = exclude_from_weight_decay @classmethod def lowerCAmelCase__ ( cls : Dict , _lowerCAmelCase : Optional[Any] ) -> Tuple: """simple docstring""" snake_case_ = {"WarmUp": WarmUp} return super(_lowerCAmelCase , cls ).from_config(_lowerCAmelCase , custom_objects=_lowerCAmelCase ) def lowerCAmelCase__ ( self : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] ) -> List[str]: """simple docstring""" super(_lowerCAmelCase , self )._prepare_local(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) snake_case_ = tf.constant( self.weight_decay_rate , name="adam_weight_decay_rate" ) def lowerCAmelCase__ ( self : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] ) -> int: """simple docstring""" snake_case_ = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["weight_decay_rate"] , use_locking=self._use_locking , ) return tf.no_op() def lowerCAmelCase__ ( self : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : Dict ) -> Union[str, Any]: """simple docstring""" snake_case_ , snake_case_ = list(zip(*_lowerCAmelCase ) ) return super(_lowerCAmelCase , self ).apply_gradients(zip(_lowerCAmelCase , _lowerCAmelCase ) , name=_lowerCAmelCase , **_lowerCAmelCase ) def lowerCAmelCase__ ( self : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] ) -> int: """simple docstring""" if apply_state is None: return self._decayed_lr_t[var_dtype], {} snake_case_ = apply_state or {} snake_case_ = apply_state.get((var_device, var_dtype) ) if coefficients is None: snake_case_ = self._fallback_apply_state(_lowerCAmelCase , _lowerCAmelCase ) snake_case_ = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def lowerCAmelCase__ ( self : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple=None ) -> List[str]: """simple docstring""" snake_case_ , snake_case_ = self._get_lr(var.device , var.dtype.base_dtype , _lowerCAmelCase ) snake_case_ = self._decay_weights_op(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) with tf.control_dependencies([decay] ): return super(_lowerCAmelCase , self )._resource_apply_dense(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ) def lowerCAmelCase__ ( self : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple=None ) -> List[str]: """simple docstring""" snake_case_ , snake_case_ = self._get_lr(var.device , var.dtype.base_dtype , _lowerCAmelCase ) snake_case_ = self._decay_weights_op(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) with tf.control_dependencies([decay] ): return super(_lowerCAmelCase , self )._resource_apply_sparse(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ) def lowerCAmelCase__ ( self : Union[str, Any] ) -> int: """simple docstring""" snake_case_ = super().get_config() config.update({"weight_decay_rate": self.weight_decay_rate} ) return config def lowerCAmelCase__ ( self : Union[str, Any] , _lowerCAmelCase : List[str] ) -> Tuple: """simple docstring""" if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(_lowerCAmelCase , _lowerCAmelCase ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(_lowerCAmelCase , _lowerCAmelCase ) is not None: return False return True class __lowerCAmelCase ( a ): """simple docstring""" def __init__( self : str ) -> Dict: """simple docstring""" snake_case_ = [] snake_case_ = None @property def lowerCAmelCase__ ( self : Dict ) -> List[Any]: """simple docstring""" if self._accum_steps is None: snake_case_ = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=_lowerCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def lowerCAmelCase__ ( self : Tuple ) -> List[str]: """simple docstring""" if not self._gradients: raise ValueError("The accumulator should be called first to initialize the gradients" ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self : List[Any] , _lowerCAmelCase : Union[str, Any] ) -> Optional[Any]: """simple docstring""" if not self._gradients: snake_case_ = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(_lowerCAmelCase ) , trainable=_lowerCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(_lowerCAmelCase ) != len(self._gradients ): raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(_lowerCAmelCase )}''' ) for accum_gradient, gradient in zip(self._gradients , _lowerCAmelCase ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(_lowerCAmelCase ) self._accum_steps.assign_add(1 ) def lowerCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(_lowerCAmelCase ) )
283
1
import os import pytest from attr import dataclass _SCREAMING_SNAKE_CASE = 'us-east-1' # defaults region @dataclass class a : """simple docstring""" lowerCamelCase :List[Any] = 42 lowerCamelCase :Union[str, Any] = '''arn:aws:iam::558105141721:role/sagemaker_execution_role''' lowerCamelCase :str = { '''task_name''': '''mnli''', '''per_device_train_batch_size''': 16, '''per_device_eval_batch_size''': 16, '''do_train''': True, '''do_eval''': True, '''do_predict''': True, '''output_dir''': '''/opt/ml/model''', '''overwrite_output_dir''': True, '''max_steps''': 500, '''save_steps''': 5500, } lowerCamelCase :Optional[Any] = {**hyperparameters, '''max_steps''': 1000} @property def UpperCAmelCase ( self ) -> Any: if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def UpperCAmelCase ( self ) -> str: return F'''{self.framework}-transfromers-test''' @property def UpperCAmelCase ( self ) -> List[str]: return F'''./tests/sagemaker/scripts/{self.framework}''' @property def UpperCAmelCase ( self ) -> List[Any]: if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope="""class""") def snake_case ( snake_case__ :Dict) -> str: _A = SageMakerTestEnvironment(framework=request.cls.framework)
712
from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) @add_end_docstrings(__lowerCAmelCase ) class a ( __lowerCAmelCase ): """simple docstring""" def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]: super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ ) self.check_model_type(lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Tuple: _A , _A = {}, {} if padding is not None: _A = padding if truncation is not None: _A = truncation if top_k is not None: _A = top_k return preprocess_params, {}, postprocess_params def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ ) -> Union[str, Any]: if isinstance(lowerCAmelCase_ , (Image.Image, str) ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _A = {"""image""": image, """question""": question} else: _A = image _A = super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ ) return results def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Any: _A = load_image(inputs["""image"""] ) _A = self.tokenizer( inputs["""question"""] , return_tensors=self.framework , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ ) _A = self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework ) model_inputs.update(lowerCAmelCase_ ) return model_inputs def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple: _A = self.model(**lowerCAmelCase_ ) return model_outputs def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=5 ) -> Union[str, Any]: if top_k > self.model.config.num_labels: _A = self.model.config.num_labels if self.framework == "pt": _A = model_outputs.logits.sigmoid()[0] _A , _A = probs.topk(lowerCAmelCase_ ) else: raise ValueError(F'''Unsupported framework: {self.framework}''' ) _A = scores.tolist() _A = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
83
0
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCamelCase_ : int = 16 UpperCamelCase_ : Any = 32 def A_ (__a , __a = 16 ): '''simple docstring''' A_ = AutoTokenizer.from_pretrained("bert-base-cased" ) A_ = load_dataset("glue" , "mrpc" ) def tokenize_function(__a ): # max_length=None => use the model max length (it's actually the default) A_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__a , max_length=__a ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): A_ = datasets.map( __a , batched=__a , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A_ = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(__a ): # On TPU it's best to pad everything to the same length or training will be very slow. A_ = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": A_ = 16 elif accelerator.mixed_precision != "no": A_ = 8 else: A_ = None return tokenizer.pad( __a , padding="longest" , max_length=__a , pad_to_multiple_of=__a , return_tensors="pt" , ) # Instantiate dataloaders. A_ = DataLoader( tokenized_datasets["train"] , shuffle=__a , collate_fn=__a , batch_size=__a ) A_ = DataLoader( tokenized_datasets["validation"] , shuffle=__a , collate_fn=__a , batch_size=__a ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCamelCase_ : str = mocked_dataloaders # noqa: F811 def A_ (__a , __a ): '''simple docstring''' if os.environ.get("TESTING_MOCKED_DATALOADERS" , __a ) == "1": A_ = 2 # New Code # A_ = int(args.gradient_accumulation_steps ) # Initialize accelerator A_ = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__a ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( "Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A_ = config["lr"] A_ = int(config["num_epochs"] ) A_ = int(config["seed"] ) A_ = int(config["batch_size"] ) A_ = evaluate.load("glue" , "mrpc" ) set_seed(__a ) A_ , A_ = get_dataloaders(__a , __a ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A_ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=__a ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). A_ = model.to(accelerator.device ) # Instantiate optimizer A_ = AdamW(params=model.parameters() , lr=__a ) # Instantiate scheduler A_ = get_linear_schedule_with_warmup( optimizer=__a , num_warmup_steps=100 , num_training_steps=(len(__a ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A_ , A_ , A_ , A_ , A_ = accelerator.prepare( __a , __a , __a , __a , __a ) # Now we train the model for epoch in range(__a ): model.train() for step, batch in enumerate(__a ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(__a ): A_ = model(**__a ) A_ = output.loss accelerator.backward(__a ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__a ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): A_ = model(**__a ) A_ = outputs.logits.argmax(dim=-1 ) A_ , A_ = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=__a , references=__a , ) A_ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'epoch {epoch}:' , __a ) def A_ (): '''simple docstring''' A_ = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=__a , default=__a , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) # New Code # parser.add_argument( "--gradient_accumulation_steps" , type=__a , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) A_ = parser.parse_args() A_ = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(__a , __a ) if __name__ == "__main__": main()
115
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase_ : List[str] = {'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ : Tuple = [ '''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''', '''SEWForCTC''', '''SEWForSequenceClassification''', '''SEWModel''', '''SEWPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys UpperCamelCase_ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
115
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING import torch from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class __lowerCAmelCase ( __a ): snake_case : Any = """dandelin/vilt-b32-finetuned-vqa""" snake_case : Optional[Any] = ( """This is a tool that answers a question about an image. It takes an input named `image` which should be the """ """image containing the information, as well as a `question` which should be the question in English. It """ """returns a text that is the answer to the question.""" ) snake_case : str = """image_qa""" snake_case : Union[str, Any] = AutoProcessor snake_case : Tuple = AutoModelForVisualQuestionAnswering snake_case : Union[str, Any] = ["""image""", """text"""] snake_case : Optional[int] = ["""text"""] def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ): requires_backends(self , ["""vision"""] ) super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ ) def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ ): return self.pre_processor(lowerCAmelCase__ , lowerCAmelCase__ , return_tensors="""pt""" ) def snake_case_ (self , lowerCAmelCase__ ): with torch.no_grad(): return self.model(**lowerCAmelCase__ ).logits def snake_case_ (self , lowerCAmelCase__ ): _UpperCAmelCase : Union[str, Any] = outputs.argmax(-1 ).item() return self.model.config.idalabel[idx]
156
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) lowerCAmelCase_ : List[str] = { '''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''], '''processing_layoutlmv2''': ['''LayoutLMv2Processor'''], '''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : Optional[Any] = ['''LayoutLMv2TokenizerFast'''] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : Dict = ['''LayoutLMv2FeatureExtractor'''] lowerCAmelCase_ : str = ['''LayoutLMv2ImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : Any = [ '''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LayoutLMv2ForQuestionAnswering''', '''LayoutLMv2ForSequenceClassification''', '''LayoutLMv2ForTokenClassification''', '''LayoutLMv2Layer''', '''LayoutLMv2Model''', '''LayoutLMv2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys lowerCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
156
1
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE__ :List[str] = botoa.client('iam' ) SCREAMING_SNAKE_CASE__ :str = { 'Version': '2012-10-17', 'Statement': [ {'Effect': 'Allow', 'Principal': {'Service': 'sagemaker.amazonaws.com'}, 'Action': 'sts:AssumeRole'} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=UpperCAmelCase__ , AssumeRolePolicyDocument=json.dumps(UpperCAmelCase__ , indent=2 ) ) SCREAMING_SNAKE_CASE__ :int = { 'Version': '2012-10-17', 'Statement': [ { 'Effect': 'Allow', 'Action': [ 'sagemaker:*', 'ecr:GetDownloadUrlForLayer', 'ecr:BatchGetImage', 'ecr:BatchCheckLayerAvailability', 'ecr:GetAuthorizationToken', 'cloudwatch:PutMetricData', 'cloudwatch:GetMetricData', 'cloudwatch:GetMetricStatistics', 'cloudwatch:ListMetrics', 'logs:CreateLogGroup', 'logs:CreateLogStream', 'logs:DescribeLogStreams', 'logs:PutLogEvents', 'logs:GetLogEvents', 's3:CreateBucket', 's3:ListBucket', 's3:GetBucketLocation', 's3:GetObject', 's3:PutObject', ], 'Resource': '*', } ], } # attach policy to role iam_client.put_role_policy( RoleName=UpperCAmelCase__ , PolicyName=F'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(UpperCAmelCase__ , indent=2 ) , ) except iam_client.exceptions.EntityAlreadyExistsException: print(F'''role {role_name} already exists. Using existing one''' ) def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE__ :Any = botoa.client('iam' ) return iam_client.get_role(RoleName=UpperCAmelCase__ )["Role"]["Arn"] def lowerCamelCase ( ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE__ :Optional[int] = _ask_options( 'How do you want to authorize?' , ['AWS Profile', 'Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '] , UpperCAmelCase__ , ) SCREAMING_SNAKE_CASE__ :int = None if credentials_configuration == 0: SCREAMING_SNAKE_CASE__ :Optional[int] = _ask_field('Enter your AWS Profile name: [default] ' , default='default' ) SCREAMING_SNAKE_CASE__ :Union[str, Any] = aws_profile else: print( 'Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,' '`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`' ) SCREAMING_SNAKE_CASE__ :Optional[Any] = _ask_field('AWS Access Key ID: ' ) SCREAMING_SNAKE_CASE__ :List[Any] = aws_access_key_id SCREAMING_SNAKE_CASE__ :int = _ask_field('AWS Secret Access Key: ' ) SCREAMING_SNAKE_CASE__ :int = aws_secret_access_key SCREAMING_SNAKE_CASE__ :Union[str, Any] = _ask_field('Enter your AWS Region: [us-east-1]' , default='us-east-1' ) SCREAMING_SNAKE_CASE__ :Tuple = aws_region SCREAMING_SNAKE_CASE__ :List[Any] = _ask_options( 'Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?' , ['Provide IAM Role name', 'Create new IAM role using credentials'] , UpperCAmelCase__ , ) if role_management == 0: SCREAMING_SNAKE_CASE__ :Tuple = _ask_field('Enter your IAM role name: ' ) else: SCREAMING_SNAKE_CASE__ :Optional[Any] = 'accelerate_sagemaker_execution_role' print(F'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' ) _create_iam_role_for_sagemaker(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE__ :Optional[Any] = _ask_field( 'Do you want to use custom Docker image? [yes/NO]: ' , _convert_yes_no_to_bool , default=UpperCAmelCase__ , error_message='Please enter yes or no.' , ) SCREAMING_SNAKE_CASE__ :List[str] = None if is_custom_docker_image: SCREAMING_SNAKE_CASE__ :List[Any] = _ask_field('Enter your Docker image: ' , lambda UpperCAmelCase__ : str(UpperCAmelCase__ ).lower() ) SCREAMING_SNAKE_CASE__ :Optional[int] = _ask_field( 'Do you want to provide SageMaker input channels with data locations? [yes/NO]: ' , _convert_yes_no_to_bool , default=UpperCAmelCase__ , error_message='Please enter yes or no.' , ) SCREAMING_SNAKE_CASE__ :Any = None if is_sagemaker_inputs_enabled: SCREAMING_SNAKE_CASE__ :int = _ask_field( 'Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ' , lambda UpperCAmelCase__ : str(UpperCAmelCase__ ).lower() , ) SCREAMING_SNAKE_CASE__ :List[str] = _ask_field( 'Do you want to enable SageMaker metrics? [yes/NO]: ' , _convert_yes_no_to_bool , default=UpperCAmelCase__ , error_message='Please enter yes or no.' , ) SCREAMING_SNAKE_CASE__ :Optional[Any] = None if is_sagemaker_metrics_enabled: SCREAMING_SNAKE_CASE__ :List[str] = _ask_field( 'Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ' , lambda UpperCAmelCase__ : str(UpperCAmelCase__ ).lower() , ) SCREAMING_SNAKE_CASE__ :str = _ask_options( 'What is the distributed mode?' , ['No distributed training', 'Data parallelism'] , _convert_sagemaker_distributed_mode , ) SCREAMING_SNAKE_CASE__ :Dict = {} SCREAMING_SNAKE_CASE__ :List[Any] = _ask_field( 'Do you wish to optimize your script with torch dynamo?[yes/NO]:' , _convert_yes_no_to_bool , default=UpperCAmelCase__ , error_message='Please enter yes or no.' , ) if use_dynamo: SCREAMING_SNAKE_CASE__ :int = 'dynamo_' SCREAMING_SNAKE_CASE__ :List[Any] = _ask_options( 'Which dynamo backend would you like to use?' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , ) SCREAMING_SNAKE_CASE__ :Union[str, Any] = _ask_field( 'Do you want to customize the defaults sent to torch.compile? [yes/NO]: ' , _convert_yes_no_to_bool , default=UpperCAmelCase__ , error_message='Please enter yes or no.' , ) if use_custom_options: SCREAMING_SNAKE_CASE__ :int = _ask_options( 'Which mode do you want to use?' , UpperCAmelCase__ , lambda UpperCAmelCase__ : TORCH_DYNAMO_MODES[int(UpperCAmelCase__ )] , default='default' , ) SCREAMING_SNAKE_CASE__ :Any = _ask_field( 'Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ' , _convert_yes_no_to_bool , default=UpperCAmelCase__ , error_message='Please enter yes or no.' , ) SCREAMING_SNAKE_CASE__ :List[str] = _ask_field( 'Do you want to enable dynamic shape tracing? [yes/NO]: ' , _convert_yes_no_to_bool , default=UpperCAmelCase__ , error_message='Please enter yes or no.' , ) SCREAMING_SNAKE_CASE__ :Any = 'Which EC2 instance type you want to use for your training?' if distributed_type != SageMakerDistributedType.NO: SCREAMING_SNAKE_CASE__ :str = _ask_options( UpperCAmelCase__ , UpperCAmelCase__ , lambda UpperCAmelCase__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(UpperCAmelCase__ )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" SCREAMING_SNAKE_CASE__ :List[Any] = _ask_field(UpperCAmelCase__ , lambda UpperCAmelCase__ : str(UpperCAmelCase__ ).lower() , default='ml.p3.2xlarge' ) SCREAMING_SNAKE_CASE__ :List[str] = 1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): SCREAMING_SNAKE_CASE__ :Dict = _ask_field( 'How many machines do you want use? [1]: ' , UpperCAmelCase__ , default=1 , ) SCREAMING_SNAKE_CASE__ :str = _ask_options( 'Do you wish to use FP16 or BF16 (mixed precision)?' , ['no', 'fp16', 'bf16', 'fp8'] , _convert_mixed_precision , ) if use_dynamo and mixed_precision == "no": print( 'Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.' ) return SageMakerConfig( image_uri=UpperCAmelCase__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=UpperCAmelCase__ , use_cpu=UpperCAmelCase__ , dynamo_config=UpperCAmelCase__ , eca_instance_type=UpperCAmelCase__ , profile=UpperCAmelCase__ , region=UpperCAmelCase__ , iam_role_name=UpperCAmelCase__ , mixed_precision=UpperCAmelCase__ , num_machines=UpperCAmelCase__ , sagemaker_inputs_file=UpperCAmelCase__ , sagemaker_metrics_file=UpperCAmelCase__ , )
209
'''simple docstring''' import absl # noqa: F401 # Here to have a nice missing dependency error message early on import nltk # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import six # noqa: F401 # Here to have a nice missing dependency error message early on from rouge_score import rouge_scorer, scoring import datasets UpperCamelCase_ = '''\ @inproceedings{lin-2004-rouge, title = "{ROUGE}: A Package for Automatic Evaluation of Summaries", author = "Lin, Chin-Yew", booktitle = "Text Summarization Branches Out", month = jul, year = "2004", address = "Barcelona, Spain", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W04-1013", pages = "74--81", } ''' UpperCamelCase_ = '''\ ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for evaluating automatic summarization and machine translation software in natural language processing. The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation. Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters. This metrics is a wrapper around Google Research reimplementation of ROUGE: https://github.com/google-research/google-research/tree/master/rouge ''' UpperCamelCase_ = ''' Calculates average rouge scores for a list of hypotheses and references Args: predictions: list of predictions to score. Each prediction should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. rouge_types: A list of rouge types to calculate. Valid names: `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring, `"rougeL"`: Longest common subsequence based scoring. `"rougeLSum"`: rougeLsum splits text using `"\n"`. See details in https://github.com/huggingface/datasets/issues/617 use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes. use_aggregator: Return aggregates if this is set to True Returns: rouge1: rouge_1 (precision, recall, f1), rouge2: rouge_2 (precision, recall, f1), rougeL: rouge_l (precision, recall, f1), rougeLsum: rouge_lsum (precision, recall, f1) Examples: >>> rouge = datasets.load_metric(\'rouge\') >>> predictions = ["hello there", "general kenobi"] >>> references = ["hello there", "general kenobi"] >>> results = rouge.compute(predictions=predictions, references=references) >>> print(list(results.keys())) [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\'] >>> print(results["rouge1"]) AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0)) >>> print(results["rouge1"].mid.fmeasure) 1.0 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _SCREAMING_SNAKE_CASE( datasets.Metric ): def __lowerCamelCase ( self : Any ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[ 'https://en.wikipedia.org/wiki/ROUGE_(metric)', 'https://github.com/google-research/google-research/tree/master/rouge', ] , ) def __lowerCamelCase ( self : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : str=False ) -> Optional[Any]: if rouge_types is None: SCREAMING_SNAKE_CASE__ :Optional[int] = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum'] SCREAMING_SNAKE_CASE__ :Union[str, Any] = rouge_scorer.RougeScorer(rouge_types=UpperCamelCase_ , use_stemmer=UpperCamelCase_ ) if use_aggregator: SCREAMING_SNAKE_CASE__ :Tuple = scoring.BootstrapAggregator() else: SCREAMING_SNAKE_CASE__ :Any = [] for ref, pred in zip(UpperCamelCase_ , UpperCamelCase_ ): SCREAMING_SNAKE_CASE__ :Tuple = scorer.score(UpperCamelCase_ , UpperCamelCase_ ) if use_aggregator: aggregator.add_scores(UpperCamelCase_ ) else: scores.append(UpperCamelCase_ ) if use_aggregator: SCREAMING_SNAKE_CASE__ :Tuple = aggregator.aggregate() else: SCREAMING_SNAKE_CASE__ :List[str] = {} for key in scores[0]: SCREAMING_SNAKE_CASE__ :int = [score[key] for score in scores] return result
209
1
import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class a : def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any]=13 , SCREAMING_SNAKE_CASE_ : Any=7 , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : List[str]=99 , SCREAMING_SNAKE_CASE_ : List[Any]=32 , SCREAMING_SNAKE_CASE_ : Optional[int]=5 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE_ : List[str]=37 , SCREAMING_SNAKE_CASE_ : str="gelu" , SCREAMING_SNAKE_CASE_ : Any=0.1 , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : int=512 , SCREAMING_SNAKE_CASE_ : Any=16 , SCREAMING_SNAKE_CASE_ : Optional[int]=2 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE_ : int=3 , SCREAMING_SNAKE_CASE_ : Optional[Any]=4 , SCREAMING_SNAKE_CASE_ : Dict=None , ): __lowerCamelCase: Tuple = parent __lowerCamelCase: Dict = batch_size __lowerCamelCase: List[str] = seq_length __lowerCamelCase: str = is_training __lowerCamelCase: Optional[Any] = use_input_mask __lowerCamelCase: Optional[Any] = use_token_type_ids __lowerCamelCase: int = use_labels __lowerCamelCase: Optional[Any] = vocab_size __lowerCamelCase: Tuple = hidden_size __lowerCamelCase: int = num_hidden_layers __lowerCamelCase: int = num_attention_heads __lowerCamelCase: Optional[Any] = intermediate_size __lowerCamelCase: List[str] = hidden_act __lowerCamelCase: int = hidden_dropout_prob __lowerCamelCase: List[Any] = attention_probs_dropout_prob __lowerCamelCase: int = max_position_embeddings __lowerCamelCase: Tuple = type_vocab_size __lowerCamelCase: str = type_sequence_label_size __lowerCamelCase: Dict = initializer_range __lowerCamelCase: Tuple = num_labels __lowerCamelCase: List[Any] = num_choices __lowerCamelCase: int = scope def SCREAMING_SNAKE_CASE__ ( self : List[str] ): __lowerCamelCase: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase: str = None if self.use_input_mask: __lowerCamelCase: Tuple = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase: List[str] = None if self.use_token_type_ids: __lowerCamelCase: Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCamelCase: Union[str, Any] = None __lowerCamelCase: List[Any] = None __lowerCamelCase: int = None if self.use_labels: __lowerCamelCase: int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase: Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCamelCase: Dict = ids_tensor([self.batch_size] , self.num_choices ) __lowerCamelCase: str = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE__ ( self : Any ): return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ): __lowerCamelCase: List[str] = NystromformerModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __lowerCamelCase: str = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase: Any = model(SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase: List[Any] = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ): __lowerCamelCase: Tuple = NystromformerForMaskedLM(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __lowerCamelCase: Any = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] ): __lowerCamelCase: List[Any] = NystromformerForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __lowerCamelCase: Union[str, Any] = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] ): __lowerCamelCase: Optional[Any] = self.num_labels __lowerCamelCase: int = NystromformerForSequenceClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __lowerCamelCase: Optional[int] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str ): __lowerCamelCase: Optional[Any] = self.num_labels __lowerCamelCase: List[str] = NystromformerForTokenClassification(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __lowerCamelCase: Union[str, Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any ): __lowerCamelCase: Union[str, Any] = self.num_choices __lowerCamelCase: str = NystromformerForMultipleChoice(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __lowerCamelCase: Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase: Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase: List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase: List[str] = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE__ ( self : str ): __lowerCamelCase: List[Any] = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ): Union[str, Any] = config_and_inputs __lowerCamelCase: str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class a ( _UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ): UpperCAmelCase__ : Optional[Any] = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) UpperCAmelCase__ : int = ( { "feature-extraction": NystromformerModel, "fill-mask": NystromformerForMaskedLM, "question-answering": NystromformerForQuestionAnswering, "text-classification": NystromformerForSequenceClassification, "token-classification": NystromformerForTokenClassification, "zero-shot": NystromformerForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase__ : Tuple = False UpperCAmelCase__ : Tuple = False def SCREAMING_SNAKE_CASE__ ( self : Tuple ): __lowerCamelCase: Any = NystromformerModelTester(self ) __lowerCamelCase: Any = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self : str ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self : List[str] ): __lowerCamelCase: Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): __lowerCamelCase: int = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowerCamelCase: int = type self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): __lowerCamelCase: int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE__ ( self : Dict ): __lowerCamelCase: Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): __lowerCamelCase: Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): __lowerCamelCase: int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): __lowerCamelCase: Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase: Tuple = NystromformerModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) @require_torch class a ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): __lowerCamelCase: Optional[int] = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" ) __lowerCamelCase: Optional[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): __lowerCamelCase: List[Any] = model(SCREAMING_SNAKE_CASE_ )[0] __lowerCamelCase: str = torch.Size((1, 6, 768) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase: Union[str, Any] = torch.tensor( [[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ): __lowerCamelCase: Tuple = """the [MASK] of Belgium is Brussels""" __lowerCamelCase: Optional[Any] = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" ) __lowerCamelCase: Optional[int] = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" ) __lowerCamelCase: str = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ) with torch.no_grad(): __lowerCamelCase: Any = model(encoding.input_ids ).logits __lowerCamelCase: Any = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , """capital""" )
189
class a : def __init__( self : Any , SCREAMING_SNAKE_CASE_ : str = "" , SCREAMING_SNAKE_CASE_ : bool = False ): # Mapping from the first character of the prefix of the node __lowerCamelCase: dict[str, RadixNode] = {} # A node will be a leaf if the tree contains its word __lowerCamelCase: str = is_leaf __lowerCamelCase: Optional[int] = prefix def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ): __lowerCamelCase: Optional[Any] = 0 for q, w in zip(self.prefix , SCREAMING_SNAKE_CASE_ ): if q != w: break x += 1 return self.prefix[:x], self.prefix[x:], word[x:] def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : list[str] ): for word in words: self.insert(SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : str ): # Case 1: If the word is the prefix of the node # Solution: We set the current node as leaf if self.prefix == word: __lowerCamelCase: Union[str, Any] = True # Case 2: The node has no edges that have a prefix to the word # Solution: We create an edge from the current node to a new one # containing the word elif word[0] not in self.nodes: __lowerCamelCase: Any = RadixNode(prefix=SCREAMING_SNAKE_CASE_ , is_leaf=SCREAMING_SNAKE_CASE_ ) else: __lowerCamelCase: Union[str, Any] = self.nodes[word[0]] __lowerCamelCase , __lowerCamelCase , __lowerCamelCase: List[str] = incoming_node.match( SCREAMING_SNAKE_CASE_ ) # Case 3: The node prefix is equal to the matching # Solution: We insert remaining word on the next node if remaining_prefix == "": self.nodes[matching_string[0]].insert(SCREAMING_SNAKE_CASE_ ) # Case 4: The word is greater equal to the matching # Solution: Create a node in between both nodes, change # prefixes and add the new node for the remaining word else: __lowerCamelCase: List[Any] = remaining_prefix __lowerCamelCase: Optional[Any] = self.nodes[matching_string[0]] __lowerCamelCase: Optional[int] = RadixNode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase: Union[str, Any] = aux_node if remaining_word == "": __lowerCamelCase: Optional[int] = True else: self.nodes[matching_string[0]].insert(SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : str ): __lowerCamelCase: int = self.nodes.get(word[0] , SCREAMING_SNAKE_CASE_ ) if not incoming_node: return False else: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase: Any = incoming_node.match( SCREAMING_SNAKE_CASE_ ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # This applies when the word and the prefix are equal elif remaining_word == "": return incoming_node.is_leaf # We have word remaining so we check the next node else: return incoming_node.find(SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : str ): __lowerCamelCase: str = self.nodes.get(word[0] , SCREAMING_SNAKE_CASE_ ) if not incoming_node: return False else: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase: Dict = incoming_node.match( SCREAMING_SNAKE_CASE_ ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # We have word remaining so we check the next node elif remaining_word != "": return incoming_node.delete(SCREAMING_SNAKE_CASE_ ) else: # If it is not a leaf, we don't have to delete if not incoming_node.is_leaf: return False else: # We delete the nodes if no edges go from it if len(incoming_node.nodes ) == 0: del self.nodes[word[0]] # We merge the current node with its only child if len(self.nodes ) == 1 and not self.is_leaf: __lowerCamelCase: List[Any] = list(self.nodes.values() )[0] __lowerCamelCase: Any = merging_node.is_leaf self.prefix += merging_node.prefix __lowerCamelCase: Tuple = merging_node.nodes # If there is more than 1 edge, we just mark it as non-leaf elif len(incoming_node.nodes ) > 1: __lowerCamelCase: int = False # If there is 1 edge, we merge it with its child else: __lowerCamelCase: Union[str, Any] = list(incoming_node.nodes.values() )[0] __lowerCamelCase: List[str] = merging_node.is_leaf incoming_node.prefix += merging_node.prefix __lowerCamelCase: Union[str, Any] = merging_node.nodes return True def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int = 0 ): if self.prefix != "": print("""-""" * height , self.prefix , """ (leaf)""" if self.is_leaf else """""" ) for value in self.nodes.values(): value.print_tree(height + 1 ) def __lowerCAmelCase ( ) -> bool: __lowerCamelCase: Optional[int] = """banana bananas bandana band apple all beast""".split() __lowerCamelCase: Optional[Any] = RadixNode() root.insert_many(snake_case ) assert all(root.find(snake_case ) for word in words ) assert not root.find("""bandanas""" ) assert not root.find("""apps""" ) root.delete("""all""" ) assert not root.find("""all""" ) root.delete("""banana""" ) assert not root.find("""banana""" ) assert root.find("""bananas""" ) return True def __lowerCAmelCase ( ) -> None: assert test_trie() def __lowerCAmelCase ( ) -> None: __lowerCamelCase: int = RadixNode() __lowerCamelCase: str = """banana bananas bandanas bandana band apple all beast""".split() root.insert_many(snake_case ) print("""Words:""" , snake_case ) print("""Tree:""" ) root.print_tree() if __name__ == "__main__": main()
189
1
"""simple docstring""" class UpperCAmelCase__ : """simple docstring""" def __init__( self ) -> int: a_ : List[str] = 0 a_ : Dict = 0 a_ : int = {} def A ( self , _SCREAMING_SNAKE_CASE ) -> Any: if vertex not in self.adjacency: a_ : Union[str, Any] = {} self.num_vertices += 1 def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: self.add_vertex(_SCREAMING_SNAKE_CASE ) self.add_vertex(_SCREAMING_SNAKE_CASE ) if head == tail: return a_ : Union[str, Any] = weight a_ : Dict = weight def A ( self ) -> Optional[int]: a_ : Optional[int] = self.get_edges() for edge in edges: a_ , a_ , a_ : List[str] = edge edges.remove((tail, head, weight) ) for i in range(len(_SCREAMING_SNAKE_CASE ) ): a_ : Union[str, Any] = list(edges[i] ) edges.sort(key=lambda _SCREAMING_SNAKE_CASE : e[2] ) for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 ): if edges[i][2] >= edges[i + 1][2]: a_ : str = edges[i][2] + 1 for edge in edges: a_ , a_ , a_ : List[str] = edge a_ : Dict = weight a_ : Any = weight def __str__( self ) -> int: a_ : List[Any] = "" for tail in self.adjacency: for head in self.adjacency[tail]: a_ : Tuple = self.adjacency[head][tail] string += f'''{head} -> {tail} == {weight}\n''' return string.rstrip("\n" ) def A ( self ) -> Tuple: a_ : int = [] for tail in self.adjacency: for head in self.adjacency[tail]: output.append((tail, head, self.adjacency[head][tail]) ) return output def A ( self ) -> str: return self.adjacency.keys() @staticmethod def A ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> int: a_ : str = Graph() if vertices is None: a_ : Any = [] if edges is None: a_ : Optional[int] = [] for vertex in vertices: g.add_vertex(_SCREAMING_SNAKE_CASE ) for edge in edges: g.add_edge(*_SCREAMING_SNAKE_CASE ) return g class UpperCAmelCase__ : """simple docstring""" def __init__( self ) -> Optional[int]: a_ : List[Any] = {} a_ : int = {} def __len__( self ) -> Optional[Any]: return len(self.parent ) def A ( self , _SCREAMING_SNAKE_CASE ) -> str: if item in self.parent: return self.find(_SCREAMING_SNAKE_CASE ) a_ : str = item a_ : List[Any] = 0 return item def A ( self , _SCREAMING_SNAKE_CASE ) -> Any: if item not in self.parent: return self.make_set(_SCREAMING_SNAKE_CASE ) if item != self.parent[item]: a_ : Any = self.find(self.parent[item] ) return self.parent[item] def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: a_ : Any = self.find(_SCREAMING_SNAKE_CASE ) a_ : Dict = self.find(_SCREAMING_SNAKE_CASE ) if roota == roota: return roota if self.rank[roota] > self.rank[roota]: a_ : str = roota return roota if self.rank[roota] < self.rank[roota]: a_ : List[str] = roota return roota if self.rank[roota] == self.rank[roota]: self.rank[roota] += 1 a_ : Optional[Any] = roota return roota return None @staticmethod def A ( _SCREAMING_SNAKE_CASE ) -> int: a_ : Optional[int] = graph.num_vertices a_ : Optional[int] = Graph.UnionFind() a_ : List[str] = [] while num_components > 1: a_ : List[str] = {} for vertex in graph.get_vertices(): a_ : str = -1 a_ : Optional[int] = graph.get_edges() for edge in edges: a_ , a_ , a_ : str = edge edges.remove((tail, head, weight) ) for edge in edges: a_ , a_ , a_ : List[Any] = edge a_ : Optional[Any] = union_find.find(_SCREAMING_SNAKE_CASE ) a_ : int = union_find.find(_SCREAMING_SNAKE_CASE ) if seta != seta: if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: a_ : Dict = [head, tail, weight] if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: a_ : str = [head, tail, weight] for vertex in cheap_edge: if cheap_edge[vertex] != -1: a_ , a_ , a_ : List[Any] = cheap_edge[vertex] if union_find.find(_SCREAMING_SNAKE_CASE ) != union_find.find(_SCREAMING_SNAKE_CASE ): union_find.union(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) mst_edges.append(cheap_edge[vertex] ) a_ : int = num_components - 1 a_ : Optional[Any] = Graph.build(edges=_SCREAMING_SNAKE_CASE ) return mst
473
"""simple docstring""" from __future__ import annotations from math import pi, sqrt def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :float , _SCREAMING_SNAKE_CASE :float ) -> tuple: if inductance <= 0: raise ValueError("Inductance cannot be 0 or negative" ) elif capacitance <= 0: raise ValueError("Capacitance cannot be 0 or negative" ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
473
1
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm a__ = logging.get_logger(__name__) @dataclass class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : Dict = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self : str , **UpperCamelCase__ : Dict): '''simple docstring''' for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: snake_case__ = deprecated_arg[3:] setattr(self , UpperCamelCase__ , not kwargs.pop(UpperCamelCase__)) logger.warning( F'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or''' F''' {positive_arg}={kwargs[positive_arg]}''') snake_case__ = kwargs.pop("""torchscript""" , self.torchscript) snake_case__ = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics) snake_case__ = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level) super().__init__(**UpperCamelCase__) _lowercase : bool = field(default=lowercase_ , metadata={'''help''': '''Trace the models using torchscript'''} ) _lowercase : bool = field(default=lowercase_ , metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''} ) _lowercase : str = field( default='''O1''' , metadata={ '''help''': ( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. ''' '''See details at https://nvidia.github.io/apex/amp.html''' ) } , ) @cached_property def __magic_name__ ( self : Tuple): '''simple docstring''' requires_backends(self , ["""torch"""]) logger.info("""PyTorch: setting up devices""") if not self.cuda: snake_case__ = torch.device("""cpu""") snake_case__ = 0 elif is_torch_tpu_available(): snake_case__ = xm.xla_device() snake_case__ = 0 else: snake_case__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""") snake_case__ = torch.cuda.device_count() return device, n_gpu @property def __magic_name__ ( self : Union[str, Any]): '''simple docstring''' return is_torch_tpu_available() and self.tpu @property def __magic_name__ ( self : List[str]): '''simple docstring''' requires_backends(self , ["""torch"""]) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def __magic_name__ ( self : Union[str, Any]): '''simple docstring''' requires_backends(self , ["""torch"""]) return self._setup_devices[0] @property def __magic_name__ ( self : str): '''simple docstring''' requires_backends(self , ["""torch"""]) return self._setup_devices[1] @property def __magic_name__ ( self : str): '''simple docstring''' return self.n_gpu > 0
99
import random from typing import Any def _UpperCAmelCase ( a : list ): for _ in range(len(a ) ): snake_case__ = random.randint(0 , len(a ) - 1 ) snake_case__ = random.randint(0 , len(a ) - 1 ) snake_case__ , snake_case__ = data[b], data[a] return data if __name__ == "__main__": a__ = [0, 1, 2, 3, 4, 5, 6, 7] a__ = ["""python""", """says""", """hello""", """!"""] print("""Fisher-Yates Shuffle:""") print("""List""", integers, strings) print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
99
1
import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): """simple docstring""" UpperCAmelCase = StableDiffusionPipeline.from_pretrained(_lowerCAmelCase , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors UpperCAmelCase = load_file(_lowerCAmelCase ) UpperCAmelCase = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: UpperCAmelCase = key.split("." )[0].split(LORA_PREFIX_TEXT_ENCODER + "_" )[-1].split("_" ) UpperCAmelCase = pipeline.text_encoder else: UpperCAmelCase = key.split("." )[0].split(LORA_PREFIX_UNET + "_" )[-1].split("_" ) UpperCAmelCase = pipeline.unet # find the target layer UpperCAmelCase = layer_infos.pop(0 ) while len(_lowerCAmelCase ) > -1: try: UpperCAmelCase = curr_layer.__getattr__(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: UpperCAmelCase = layer_infos.pop(0 ) elif len(_lowerCAmelCase ) == 0: break except Exception: if len(_lowerCAmelCase ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: UpperCAmelCase = layer_infos.pop(0 ) UpperCAmelCase = [] if "lora_down" in key: pair_keys.append(key.replace("lora_down" , "lora_up" ) ) pair_keys.append(_lowerCAmelCase ) else: pair_keys.append(_lowerCAmelCase ) pair_keys.append(key.replace("lora_up" , "lora_down" ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: UpperCAmelCase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) UpperCAmelCase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(_lowerCAmelCase , _lowerCAmelCase ).unsqueeze(2 ).unsqueeze(3 ) else: UpperCAmelCase = state_dict[pair_keys[0]].to(torch.floataa ) UpperCAmelCase = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(_lowerCAmelCase , _lowerCAmelCase ) # update visited list for item in pair_keys: visited.append(_lowerCAmelCase ) return pipeline if __name__ == "__main__": __lowerCAmelCase =argparse.ArgumentParser() parser.add_argument( "--base_model_path", default=None, type=str, required=True, help="Path to the base model in diffusers format." ) parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument( "--lora_prefix_unet", default="lora_unet", type=str, help="The prefix of UNet weight in safetensors" ) parser.add_argument( "--lora_prefix_text_encoder", default="lora_te", type=str, help="The prefix of text encoder weight in safetensors", ) parser.add_argument("--alpha", default=0.75, type=float, help="The merging ratio in W = W0 + alpha * deltaW") parser.add_argument( "--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not." ) parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)") __lowerCAmelCase =parser.parse_args() __lowerCAmelCase =args.base_model_path __lowerCAmelCase =args.checkpoint_path __lowerCAmelCase =args.dump_path __lowerCAmelCase =args.lora_prefix_unet __lowerCAmelCase =args.lora_prefix_text_encoder __lowerCAmelCase =args.alpha __lowerCAmelCase =convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) __lowerCAmelCase =pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
333
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCAmelCase =logging.get_logger(__name__) __lowerCAmelCase ={ "microsoft/beit-base-patch16-224-pt22k": ( "https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json" ), # See all BEiT models at https://huggingface.co/models?filter=beit } class __magic_name__ ( _a): _UpperCAmelCase : int = 'beit' def __init__( self : Union[str, Any] ,__SCREAMING_SNAKE_CASE : int=8_1_9_2 ,__SCREAMING_SNAKE_CASE : List[Any]=7_6_8 ,__SCREAMING_SNAKE_CASE : Any=1_2 ,__SCREAMING_SNAKE_CASE : List[str]=1_2 ,__SCREAMING_SNAKE_CASE : Optional[Any]=3_0_7_2 ,__SCREAMING_SNAKE_CASE : Dict="gelu" ,__SCREAMING_SNAKE_CASE : Tuple=0.0 ,__SCREAMING_SNAKE_CASE : int=0.0 ,__SCREAMING_SNAKE_CASE : int=0.02 ,__SCREAMING_SNAKE_CASE : Optional[int]=1e-12 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=2_2_4 ,__SCREAMING_SNAKE_CASE : List[str]=1_6 ,__SCREAMING_SNAKE_CASE : Any=3 ,__SCREAMING_SNAKE_CASE : Optional[Any]=False ,__SCREAMING_SNAKE_CASE : int=False ,__SCREAMING_SNAKE_CASE : List[str]=False ,__SCREAMING_SNAKE_CASE : List[str]=False ,__SCREAMING_SNAKE_CASE : Optional[int]=0.1 ,__SCREAMING_SNAKE_CASE : Tuple=0.1 ,__SCREAMING_SNAKE_CASE : Optional[int]=True ,__SCREAMING_SNAKE_CASE : str=[3, 5, 7, 1_1] ,__SCREAMING_SNAKE_CASE : int=[1, 2, 3, 6] ,__SCREAMING_SNAKE_CASE : Dict=True ,__SCREAMING_SNAKE_CASE : Any=0.4 ,__SCREAMING_SNAKE_CASE : List[Any]=2_5_6 ,__SCREAMING_SNAKE_CASE : List[Any]=1 ,__SCREAMING_SNAKE_CASE : Tuple=False ,__SCREAMING_SNAKE_CASE : Any=2_5_5 ,**__SCREAMING_SNAKE_CASE : List[str] ,): super().__init__(**__SCREAMING_SNAKE_CASE ) UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = initializer_range UpperCAmelCase = layer_norm_eps UpperCAmelCase = image_size UpperCAmelCase = patch_size UpperCAmelCase = num_channels UpperCAmelCase = use_mask_token UpperCAmelCase = use_absolute_position_embeddings UpperCAmelCase = use_relative_position_bias UpperCAmelCase = use_shared_relative_position_bias UpperCAmelCase = layer_scale_init_value UpperCAmelCase = drop_path_rate UpperCAmelCase = use_mean_pooling # decode head attributes (semantic segmentation) UpperCAmelCase = out_indices UpperCAmelCase = pool_scales # auxiliary head attributes (semantic segmentation) UpperCAmelCase = use_auxiliary_head UpperCAmelCase = auxiliary_loss_weight UpperCAmelCase = auxiliary_channels UpperCAmelCase = auxiliary_num_convs UpperCAmelCase = auxiliary_concat_input UpperCAmelCase = semantic_loss_ignore_index class __magic_name__ ( _a): _UpperCAmelCase : List[str] = version.parse('1.11') @property def _UpperCAmelCase ( self : Any ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def _UpperCAmelCase ( self : int ): return 1e-4
333
1
"""simple docstring""" import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class __a ( _lowerCAmelCase ): UpperCamelCase_ : List[str] = ['''image_processor''', '''tokenizer'''] UpperCamelCase_ : List[Any] = '''BlipImageProcessor''' UpperCamelCase_ : int = '''AutoTokenizer''' def __init__( self : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str )-> Optional[Any]: """simple docstring""" super().__init__(UpperCAmelCase_ , UpperCAmelCase_ ) # add QFormer tokenizer UpperCamelCase = qformer_tokenizer def __call__( self : Optional[Any] , UpperCAmelCase_ : ImageInput = None , UpperCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase_ : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase_ : int , )-> BatchFeature: """simple docstring""" if images is None and text is None: raise ValueError("You have to specify at least images or text." ) UpperCamelCase = BatchFeature() if text is not None: UpperCamelCase = self.tokenizer( text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , ) encoding.update(UpperCAmelCase_ ) UpperCamelCase = self.qformer_tokenizer( text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , ) UpperCamelCase = qformer_text_encoding.pop("input_ids" ) UpperCamelCase = qformer_text_encoding.pop("attention_mask" ) if images is not None: UpperCamelCase = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ ) encoding.update(UpperCAmelCase_ ) return encoding def _SCREAMING_SNAKE_CASE ( self : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Dict )-> Optional[int]: """simple docstring""" return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any] )-> Union[str, Any]: """simple docstring""" return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def _SCREAMING_SNAKE_CASE ( self : Tuple )-> List[str]: """simple docstring""" UpperCamelCase = self.tokenizer.model_input_names UpperCamelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase_ : Any , **UpperCAmelCase_ : Dict )-> int: """simple docstring""" if os.path.isfile(UpperCAmelCase_ ): raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file" ) os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) UpperCamelCase = os.path.join(UpperCAmelCase_ , "qformer_tokenizer" ) self.qformer_tokenizer.save_pretrained(UpperCAmelCase_ ) return super().save_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : int )-> Union[str, Any]: """simple docstring""" UpperCamelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ , subfolder="qformer_tokenizer" ) UpperCamelCase = cls._get_arguments_from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ ) args.append(UpperCAmelCase_ ) return cls(*UpperCAmelCase_ )
556
"""simple docstring""" import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node SCREAMING_SNAKE_CASE = 4 SCREAMING_SNAKE_CASE = 3 class __a ( _lowerCAmelCase ): pass def lowerCamelCase__ ( UpperCAmelCase_ )-> List[str]: """simple docstring""" for shard in shards: for i in range(UpperCAmelCase_ ): yield {"i": i, "shard": shard} def lowerCamelCase__ ( )-> Union[str, Any]: """simple docstring""" UpperCamelCase = int(os.environ["RANK"] ) UpperCamelCase = int(os.environ["WORLD_SIZE"] ) UpperCamelCase = ArgumentParser() parser.add_argument("--streaming" , type=UpperCAmelCase_ ) parser.add_argument("--local_rank" , type=UpperCAmelCase_ ) parser.add_argument("--num_workers" , type=UpperCAmelCase_ , default=0 ) UpperCamelCase = parser.parse_args() UpperCamelCase = args.streaming UpperCamelCase = args.num_workers UpperCamelCase = {"shards": [F"shard_{shard_idx}" for shard_idx in range(UpperCAmelCase_ )]} UpperCamelCase = IterableDataset.from_generator(UpperCAmelCase_ , gen_kwargs=UpperCAmelCase_ ) if not streaming: UpperCamelCase = Dataset.from_list(list(UpperCAmelCase_ ) ) UpperCamelCase = split_dataset_by_node(UpperCAmelCase_ , rank=UpperCAmelCase_ , world_size=UpperCAmelCase_ ) UpperCamelCase = torch.utils.data.DataLoader(UpperCAmelCase_ , num_workers=UpperCAmelCase_ ) UpperCamelCase = NUM_SHARDS * NUM_ITEMS_PER_SHARD UpperCamelCase = full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) UpperCamelCase = sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(F"local_size {local_size} != expected_local_size {expected_local_size}" ) if __name__ == "__main__": main()
556
1
import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class __lowerCamelCase (unittest.TestCase ): _lowercase = MODEL_FOR_CAUSAL_LM_MAPPING _lowercase = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def snake_case_ ( self: Optional[int] ): '''simple docstring''' __UpperCamelCase = pipeline(task='text-generation',model='sshleifer/tiny-ctrl',framework='pt' ) # Using `do_sample=False` to force deterministic output __UpperCamelCase = text_generator('This is a test',do_sample=_UpperCamelCase ) self.assertEqual( _UpperCamelCase,[ { 'generated_text': ( 'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.' ' oscope. FiliFili@@' ) } ],) __UpperCamelCase = text_generator(['This is a test', 'This is a second test'] ) self.assertEqual( _UpperCamelCase,[ [ { 'generated_text': ( 'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.' ' oscope. FiliFili@@' ) } ], [ { 'generated_text': ( 'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy' ' oscope. oscope. FiliFili@@' ) } ], ],) __UpperCamelCase = text_generator('This is a test',do_sample=_UpperCamelCase,num_return_sequences=2,return_tensors=_UpperCamelCase ) self.assertEqual( _UpperCamelCase,[ {'generated_token_ids': ANY(_UpperCamelCase )}, {'generated_token_ids': ANY(_UpperCamelCase )}, ],) __UpperCamelCase = text_generator.model.config.eos_token_id __UpperCamelCase = "<pad>" __UpperCamelCase = text_generator( ['This is a test', 'This is a second test'],do_sample=_UpperCamelCase,num_return_sequences=2,batch_size=2,return_tensors=_UpperCamelCase,) self.assertEqual( _UpperCamelCase,[ [ {'generated_token_ids': ANY(_UpperCamelCase )}, {'generated_token_ids': ANY(_UpperCamelCase )}, ], [ {'generated_token_ids': ANY(_UpperCamelCase )}, {'generated_token_ids': ANY(_UpperCamelCase )}, ], ],) @require_tf def snake_case_ ( self: Optional[int] ): '''simple docstring''' __UpperCamelCase = pipeline(task='text-generation',model='sshleifer/tiny-ctrl',framework='tf' ) # Using `do_sample=False` to force deterministic output __UpperCamelCase = text_generator('This is a test',do_sample=_UpperCamelCase ) self.assertEqual( _UpperCamelCase,[ { 'generated_text': ( 'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵' ' please,' ) } ],) __UpperCamelCase = text_generator(['This is a test', 'This is a second test'],do_sample=_UpperCamelCase ) self.assertEqual( _UpperCamelCase,[ [ { 'generated_text': ( 'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵' ' please,' ) } ], [ { 'generated_text': ( 'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes' ' Cannes 閲閲Cannes Cannes Cannes 攵 please,' ) } ], ],) def snake_case_ ( self: Dict,A_: Dict,A_: List[Any],A_: Any ): '''simple docstring''' __UpperCamelCase = TextGenerationPipeline(model=_UpperCamelCase,tokenizer=_UpperCamelCase ) return text_generator, ["This is a test", "Another test"] def snake_case_ ( self: str ): '''simple docstring''' __UpperCamelCase = "Hello I believe in" __UpperCamelCase = pipeline('text-generation',model='hf-internal-testing/tiny-random-gpt2' ) __UpperCamelCase = text_generator(_UpperCamelCase ) self.assertEqual( _UpperCamelCase,[{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}],) __UpperCamelCase = text_generator(_UpperCamelCase,stop_sequence=' fe' ) self.assertEqual(_UpperCamelCase,[{'generated_text': 'Hello I believe in fe'}] ) def snake_case_ ( self: Optional[int],A_: List[Any],A_: Optional[int] ): '''simple docstring''' __UpperCamelCase = text_generator.model __UpperCamelCase = text_generator.tokenizer __UpperCamelCase = text_generator('This is a test' ) self.assertEqual(_UpperCamelCase,[{'generated_text': ANY(_UpperCamelCase )}] ) self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) ) __UpperCamelCase = text_generator('This is a test',return_full_text=_UpperCamelCase ) self.assertEqual(_UpperCamelCase,[{'generated_text': ANY(_UpperCamelCase )}] ) self.assertNotIn('This is a test',outputs[0]['generated_text'] ) __UpperCamelCase = pipeline(task='text-generation',model=_UpperCamelCase,tokenizer=_UpperCamelCase,return_full_text=_UpperCamelCase ) __UpperCamelCase = text_generator('This is a test' ) self.assertEqual(_UpperCamelCase,[{'generated_text': ANY(_UpperCamelCase )}] ) self.assertNotIn('This is a test',outputs[0]['generated_text'] ) __UpperCamelCase = text_generator('This is a test',return_full_text=_UpperCamelCase ) self.assertEqual(_UpperCamelCase,[{'generated_text': ANY(_UpperCamelCase )}] ) self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) ) __UpperCamelCase = text_generator(['This is great !', 'Something else'],num_return_sequences=2,do_sample=_UpperCamelCase ) self.assertEqual( _UpperCamelCase,[ [{'generated_text': ANY(_UpperCamelCase )}, {'generated_text': ANY(_UpperCamelCase )}], [{'generated_text': ANY(_UpperCamelCase )}, {'generated_text': ANY(_UpperCamelCase )}], ],) if text_generator.tokenizer.pad_token is not None: __UpperCamelCase = text_generator( ['This is great !', 'Something else'],num_return_sequences=2,batch_size=2,do_sample=_UpperCamelCase ) self.assertEqual( _UpperCamelCase,[ [{'generated_text': ANY(_UpperCamelCase )}, {'generated_text': ANY(_UpperCamelCase )}], [{'generated_text': ANY(_UpperCamelCase )}, {'generated_text': ANY(_UpperCamelCase )}], ],) with self.assertRaises(_UpperCamelCase ): __UpperCamelCase = text_generator('test',return_full_text=_UpperCamelCase,return_text=_UpperCamelCase ) with self.assertRaises(_UpperCamelCase ): __UpperCamelCase = text_generator('test',return_full_text=_UpperCamelCase,return_tensors=_UpperCamelCase ) with self.assertRaises(_UpperCamelCase ): __UpperCamelCase = text_generator('test',return_text=_UpperCamelCase,return_tensors=_UpperCamelCase ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): __UpperCamelCase = text_generator('' ) self.assertEqual(_UpperCamelCase,[{'generated_text': ANY(_UpperCamelCase )}] ) else: with self.assertRaises((ValueError, AssertionError) ): __UpperCamelCase = text_generator('' ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. __UpperCamelCase = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"] if ( tokenizer.model_max_length < 1_0000 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator('This is a test' * 500,max_new_tokens=20 ) __UpperCamelCase = text_generator('This is a test' * 500,handle_long_generation='hole',max_new_tokens=20 ) # Hole strategy cannot work with self.assertRaises(_UpperCamelCase ): text_generator( 'This is a test' * 500,handle_long_generation='hole',max_new_tokens=tokenizer.model_max_length + 10,) @require_torch @require_accelerate @require_torch_gpu def snake_case_ ( self: List[Any] ): '''simple docstring''' import torch # Classic `model_kwargs` __UpperCamelCase = pipeline( model='hf-internal-testing/tiny-random-bloom',model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa},) self.assertEqual(pipe.model.device,torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype,torch.bfloataa ) __UpperCamelCase = pipe('This is a test' ) self.assertEqual( _UpperCamelCase,[ { 'generated_text': ( 'This is a test test test test test test test test test test test test test test test test' ' test' ) } ],) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) __UpperCamelCase = pipeline(model='hf-internal-testing/tiny-random-bloom',device_map='auto',torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device,torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype,torch.bfloataa ) __UpperCamelCase = pipe('This is a test' ) self.assertEqual( _UpperCamelCase,[ { 'generated_text': ( 'This is a test test test test test test test test test test test test test test test test' ' test' ) } ],) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 __UpperCamelCase = pipeline(model='hf-internal-testing/tiny-random-bloom',device_map='auto' ) self.assertEqual(pipe.model.device,torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype,torch.floataa ) __UpperCamelCase = pipe('This is a test' ) self.assertEqual( _UpperCamelCase,[ { 'generated_text': ( 'This is a test test test test test test test test test test test test test test test test' ' test' ) } ],) @require_torch @require_torch_gpu def snake_case_ ( self: Any ): '''simple docstring''' import torch __UpperCamelCase = pipeline(model='hf-internal-testing/tiny-random-bloom',device=0,torch_dtype=torch.floataa ) pipe('This is a test' ) @require_torch @require_accelerate @require_torch_gpu def snake_case_ ( self: Optional[Any] ): '''simple docstring''' import torch __UpperCamelCase = pipeline(model='hf-internal-testing/tiny-random-bloom',device_map='auto',torch_dtype=torch.floataa ) pipe('This is a test',do_sample=_UpperCamelCase,top_p=0.5 ) def snake_case_ ( self: List[str] ): '''simple docstring''' __UpperCamelCase = "Hello world" __UpperCamelCase = pipeline('text-generation',model='hf-internal-testing/tiny-random-gpt2' ) if text_generator.model.framework == "tf": __UpperCamelCase = logging.get_logger('transformers.generation.tf_utils' ) else: __UpperCamelCase = logging.get_logger('transformers.generation.utils' ) __UpperCamelCase = "Both `max_new_tokens`" # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(_UpperCamelCase ) as cl: __UpperCamelCase = text_generator(_UpperCamelCase,max_length=10,max_new_tokens=1 ) self.assertIn(_UpperCamelCase,cl.out ) # The user only sets one -> no warning with CaptureLogger(_UpperCamelCase ) as cl: __UpperCamelCase = text_generator(_UpperCamelCase,max_new_tokens=1 ) self.assertNotIn(_UpperCamelCase,cl.out ) with CaptureLogger(_UpperCamelCase ) as cl: __UpperCamelCase = text_generator(_UpperCamelCase,max_length=10 ) self.assertNotIn(_UpperCamelCase,cl.out )
1
'''simple docstring''' import os from collections import deque import torch from torch.utils.data import Dataset class a__ ( lowerCamelCase_ ): def __init__( self , _UpperCamelCase="" , _UpperCamelCase="train" ): """simple docstring""" assert os.path.isdir(_UpperCamelCase ) _lowercase : Optional[Any] = [] _lowercase : List[Any] = os.listdir(_UpperCamelCase ) for story_filename in story_filenames_list: if "summary" in story_filename: continue _lowercase : Union[str, Any] = os.path.join(_UpperCamelCase , _UpperCamelCase ) if not os.path.isfile(_UpperCamelCase ): continue self.documents.append(_UpperCamelCase ) def __len__( self ): """simple docstring""" return len(self.documents ) def __getitem__( self , _UpperCamelCase ): """simple docstring""" _lowercase : Dict = self.documents[idx] _lowercase : Optional[Any] = document_path.split("/" )[-1] with open(_UpperCamelCase , encoding="utf-8" ) as source: _lowercase : Dict = source.read() _lowercase , _lowercase : List[str] = process_story(_UpperCamelCase ) return document_name, story_lines, summary_lines def _A ( snake_case ) -> Optional[Any]: _lowercase : Optional[int] = list(filter(lambda snake_case : len(snake_case ) != 0 , [line.strip() for line in raw_story.split("\n" )] ) ) # for some unknown reason some lines miss a period, add it _lowercase : Optional[Any] = [_add_missing_period(snake_case ) for line in nonempty_lines] # gather article lines _lowercase : Union[str, Any] = [] _lowercase : Union[str, Any] = deque(snake_case ) while True: try: _lowercase : Dict = lines.popleft() if element.startswith("@highlight" ): break story_lines.append(snake_case ) except IndexError: # if "@highlight" is absent from the file we pop # all elements until there is None, raising an exception. return story_lines, [] # gather summary lines _lowercase : int = list(filter(lambda snake_case : not t.startswith("@highlight" ) , snake_case ) ) return story_lines, summary_lines def _A ( snake_case ) -> List[Any]: _lowercase : str = [".", "!", "?", "...", "'", "`", "\"", "\u2019", "\u2019", ")"] if line.startswith("@highlight" ): return line if line[-1] in END_TOKENS: return line return line + "." def _A ( snake_case , snake_case , snake_case ) -> Any: if len(snake_case ) > block_size: return sequence[:block_size] else: sequence.extend([pad_token_id] * (block_size - len(snake_case )) ) return sequence def _A ( snake_case , snake_case ) -> Union[str, Any]: _lowercase : Union[str, Any] = torch.ones_like(snake_case ) _lowercase : Tuple = sequence == pad_token_id _lowercase : Any = 0 return mask def _A ( snake_case , snake_case , snake_case ) -> Tuple: _lowercase : List[str] = [tokenizer.encode(snake_case ) for line in story_lines] _lowercase : Optional[int] = [token for sentence in story_lines_token_ids for token in sentence] _lowercase : Optional[Any] = [tokenizer.encode(snake_case ) for line in summary_lines] _lowercase : Optional[Any] = [token for sentence in summary_lines_token_ids for token in sentence] return story_token_ids, summary_token_ids def _A ( snake_case , snake_case ) -> Any: _lowercase : Dict = [] for sequence in batch: _lowercase : Tuple = -1 _lowercase : str = [] for s in sequence: if s == separator_token_id: sentence_num += 1 embeddings.append(sentence_num % 2 ) batch_embeddings.append(snake_case ) return torch.tensor(snake_case )
245
0
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def __init__( self : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : Tuple=7 , lowerCAmelCase : int=3 , lowerCAmelCase : Optional[int]=18 , lowerCAmelCase : str=30 , lowerCAmelCase : Tuple=400 , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Tuple=None , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Optional[Any]=True , ): lowerCAmelCase = size if size is not None else {"""shortest_edge""": 20} lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = num_channels lowerCAmelCase = image_size lowerCAmelCase = min_resolution lowerCAmelCase = max_resolution lowerCAmelCase = do_resize lowerCAmelCase = size lowerCAmelCase = do_center_crop lowerCAmelCase = crop_size lowerCAmelCase = do_flip_channel_order def __lowercase ( self : Union[str, Any] ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( _a , unittest.TestCase ): _a = MobileViTImageProcessor if is_vision_available() else None def __lowercase ( self : List[Any] ): lowerCAmelCase = MobileViTImageProcessingTester(self ) @property def __lowercase ( self : str ): return self.image_processor_tester.prepare_image_processor_dict() def __lowercase ( self : str ): lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase , """do_resize""" ) ) self.assertTrue(hasattr(lowerCAmelCase , """size""" ) ) self.assertTrue(hasattr(lowerCAmelCase , """do_center_crop""" ) ) self.assertTrue(hasattr(lowerCAmelCase , """center_crop""" ) ) self.assertTrue(hasattr(lowerCAmelCase , """do_flip_channel_order""" ) ) def __lowercase ( self : Optional[Any] ): lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 20} ) self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} ) lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def __lowercase ( self : Dict ): pass def __lowercase ( self : Any ): # Initialize image_processing lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase , Image.Image ) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCAmelCase = image_processing(lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def __lowercase ( self : Union[str, Any] ): # Initialize image_processing lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase , np.ndarray ) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCAmelCase = image_processing(lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def __lowercase ( self : Union[str, Any] ): # Initialize image_processing lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase , torch.Tensor ) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCAmelCase = image_processing(lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
529
"""simple docstring""" from collections import deque from math import floor from random import random from time import time class SCREAMING_SNAKE_CASE__ : def __init__( self : Union[str, Any] ): lowerCAmelCase = {} def __lowercase ( self : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any]=1 ): if self.graph.get(lowerCAmelCase ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: lowerCAmelCase = [[w, v]] if not self.graph.get(lowerCAmelCase ): lowerCAmelCase = [] def __lowercase ( self : Optional[int] ): return list(self.graph ) def __lowercase ( self : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict ): if self.graph.get(lowerCAmelCase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowerCAmelCase ) def __lowercase ( self : List[str] , lowerCAmelCase : Tuple=-2 , lowerCAmelCase : List[Any]=-1 ): if s == d: return [] lowerCAmelCase = [] lowerCAmelCase = [] if s == -2: lowerCAmelCase = list(self.graph )[0] stack.append(lowerCAmelCase ) visited.append(lowerCAmelCase ) lowerCAmelCase = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCAmelCase = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowerCAmelCase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) lowerCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowerCAmelCase ) != 0: lowerCAmelCase = stack[len(lowerCAmelCase ) - 1] else: lowerCAmelCase = ss # check if se have reached the starting point if len(lowerCAmelCase ) == 0: return visited def __lowercase ( self : Tuple , lowerCAmelCase : Any=-1 ): if c == -1: lowerCAmelCase = floor(random() * 1_0000 ) + 10 for i in range(lowerCAmelCase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): lowerCAmelCase = floor(random() * c ) + 1 if n != i: self.add_pair(lowerCAmelCase , lowerCAmelCase , 1 ) def __lowercase ( self : Optional[int] , lowerCAmelCase : List[str]=-2 ): lowerCAmelCase = deque() lowerCAmelCase = [] if s == -2: lowerCAmelCase = list(self.graph )[0] d.append(lowerCAmelCase ) visited.append(lowerCAmelCase ) while d: lowerCAmelCase = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def __lowercase ( self : List[Any] , lowerCAmelCase : Union[str, Any] ): lowerCAmelCase = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def __lowercase ( self : int , lowerCAmelCase : Tuple ): return len(self.graph[u] ) def __lowercase ( self : Optional[int] , lowerCAmelCase : Optional[Any]=-2 ): lowerCAmelCase = [] lowerCAmelCase = [] if s == -2: lowerCAmelCase = list(self.graph )[0] stack.append(lowerCAmelCase ) visited.append(lowerCAmelCase ) lowerCAmelCase = s lowerCAmelCase = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCAmelCase = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) lowerCAmelCase = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(lowerCAmelCase ) != 0: lowerCAmelCase = stack[len(lowerCAmelCase ) - 1] else: lowerCAmelCase = ss # check if se have reached the starting point if len(lowerCAmelCase ) == 0: return sorted_nodes def __lowercase ( self : Any ): lowerCAmelCase = [] lowerCAmelCase = [] lowerCAmelCase = list(self.graph )[0] stack.append(lowerCAmelCase ) visited.append(lowerCAmelCase ) lowerCAmelCase = -2 lowerCAmelCase = [] lowerCAmelCase = s lowerCAmelCase = False lowerCAmelCase = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCAmelCase = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): lowerCAmelCase = len(lowerCAmelCase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) lowerCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() lowerCAmelCase = True if len(lowerCAmelCase ) != 0: lowerCAmelCase = stack[len(lowerCAmelCase ) - 1] else: lowerCAmelCase = False indirect_parents.append(lowerCAmelCase ) lowerCAmelCase = s lowerCAmelCase = ss # check if se have reached the starting point if len(lowerCAmelCase ) == 0: return list(lowerCAmelCase ) def __lowercase ( self : Tuple ): lowerCAmelCase = [] lowerCAmelCase = [] lowerCAmelCase = list(self.graph )[0] stack.append(lowerCAmelCase ) visited.append(lowerCAmelCase ) lowerCAmelCase = -2 lowerCAmelCase = [] lowerCAmelCase = s lowerCAmelCase = False lowerCAmelCase = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCAmelCase = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): lowerCAmelCase = len(lowerCAmelCase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) lowerCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() lowerCAmelCase = True if len(lowerCAmelCase ) != 0: lowerCAmelCase = stack[len(lowerCAmelCase ) - 1] else: lowerCAmelCase = False indirect_parents.append(lowerCAmelCase ) lowerCAmelCase = s lowerCAmelCase = ss # check if se have reached the starting point if len(lowerCAmelCase ) == 0: return False def __lowercase ( self : List[Any] , lowerCAmelCase : Any=-2 , lowerCAmelCase : Tuple=-1 ): lowerCAmelCase = time() self.dfs(lowerCAmelCase , lowerCAmelCase ) lowerCAmelCase = time() return end - begin def __lowercase ( self : int , lowerCAmelCase : str=-2 ): lowerCAmelCase = time() self.bfs(lowerCAmelCase ) lowerCAmelCase = time() return end - begin class SCREAMING_SNAKE_CASE__ : def __init__( self : int ): lowerCAmelCase = {} def __lowercase ( self : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : str , lowerCAmelCase : str=1 ): # check if the u exists if self.graph.get(lowerCAmelCase ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist lowerCAmelCase = [[w, v]] # add the other way if self.graph.get(lowerCAmelCase ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist lowerCAmelCase = [[w, u]] def __lowercase ( self : Dict , lowerCAmelCase : Any , lowerCAmelCase : int ): if self.graph.get(lowerCAmelCase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowerCAmelCase ) # the other way round if self.graph.get(lowerCAmelCase ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(lowerCAmelCase ) def __lowercase ( self : Dict , lowerCAmelCase : Optional[int]=-2 , lowerCAmelCase : Optional[Any]=-1 ): if s == d: return [] lowerCAmelCase = [] lowerCAmelCase = [] if s == -2: lowerCAmelCase = list(self.graph )[0] stack.append(lowerCAmelCase ) visited.append(lowerCAmelCase ) lowerCAmelCase = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCAmelCase = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowerCAmelCase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) lowerCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowerCAmelCase ) != 0: lowerCAmelCase = stack[len(lowerCAmelCase ) - 1] else: lowerCAmelCase = ss # check if se have reached the starting point if len(lowerCAmelCase ) == 0: return visited def __lowercase ( self : Any , lowerCAmelCase : Dict=-1 ): if c == -1: lowerCAmelCase = floor(random() * 1_0000 ) + 10 for i in range(lowerCAmelCase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): lowerCAmelCase = floor(random() * c ) + 1 if n != i: self.add_pair(lowerCAmelCase , lowerCAmelCase , 1 ) def __lowercase ( self : int , lowerCAmelCase : Tuple=-2 ): lowerCAmelCase = deque() lowerCAmelCase = [] if s == -2: lowerCAmelCase = list(self.graph )[0] d.append(lowerCAmelCase ) visited.append(lowerCAmelCase ) while d: lowerCAmelCase = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def __lowercase ( self : List[str] , lowerCAmelCase : str ): return len(self.graph[u] ) def __lowercase ( self : Optional[Any] ): lowerCAmelCase = [] lowerCAmelCase = [] lowerCAmelCase = list(self.graph )[0] stack.append(lowerCAmelCase ) visited.append(lowerCAmelCase ) lowerCAmelCase = -2 lowerCAmelCase = [] lowerCAmelCase = s lowerCAmelCase = False lowerCAmelCase = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCAmelCase = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): lowerCAmelCase = len(lowerCAmelCase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) lowerCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() lowerCAmelCase = True if len(lowerCAmelCase ) != 0: lowerCAmelCase = stack[len(lowerCAmelCase ) - 1] else: lowerCAmelCase = False indirect_parents.append(lowerCAmelCase ) lowerCAmelCase = s lowerCAmelCase = ss # check if se have reached the starting point if len(lowerCAmelCase ) == 0: return list(lowerCAmelCase ) def __lowercase ( self : int ): lowerCAmelCase = [] lowerCAmelCase = [] lowerCAmelCase = list(self.graph )[0] stack.append(lowerCAmelCase ) visited.append(lowerCAmelCase ) lowerCAmelCase = -2 lowerCAmelCase = [] lowerCAmelCase = s lowerCAmelCase = False lowerCAmelCase = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCAmelCase = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): lowerCAmelCase = len(lowerCAmelCase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) lowerCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() lowerCAmelCase = True if len(lowerCAmelCase ) != 0: lowerCAmelCase = stack[len(lowerCAmelCase ) - 1] else: lowerCAmelCase = False indirect_parents.append(lowerCAmelCase ) lowerCAmelCase = s lowerCAmelCase = ss # check if se have reached the starting point if len(lowerCAmelCase ) == 0: return False def __lowercase ( self : str ): return list(self.graph ) def __lowercase ( self : Dict , lowerCAmelCase : Dict=-2 , lowerCAmelCase : List[str]=-1 ): lowerCAmelCase = time() self.dfs(lowerCAmelCase , lowerCAmelCase ) lowerCAmelCase = time() return end - begin def __lowercase ( self : str , lowerCAmelCase : Union[str, Any]=-2 ): lowerCAmelCase = time() self.bfs(lowerCAmelCase ) lowerCAmelCase = time() return end - begin
529
1
"""simple docstring""" import argparse import fairseq import torch from torch import nn from transformers import ( MBartaaTokenizer, MBartConfig, MBartForCausalLM, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } __lowerCamelCase = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def a ( __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] ) -> Tuple: for attribute in key.split(""".""" ): __magic_name__: Tuple = getattr(__UpperCAmelCase , __UpperCAmelCase ) if weight_type is not None: __magic_name__: Dict = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape else: __magic_name__: Tuple = hf_pointer.shape assert hf_shape == value.shape, ( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": __magic_name__: Union[str, Any] = value elif weight_type == "weight_g": __magic_name__: str = value elif weight_type == "weight_v": __magic_name__: Optional[Any] = value elif weight_type == "bias": __magic_name__: Tuple = value else: __magic_name__: Union[str, Any] = value logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def a ( __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] ) -> Union[str, Any]: __magic_name__: Union[str, Any] = [] __magic_name__: Tuple = fairseq_model.state_dict() __magic_name__: List[str] = hf_model.feature_extractor __magic_name__: str = hf_model.adapter for name, value in fairseq_dict.items(): __magic_name__: str = False if "conv_layers" in name: load_conv_layer( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == """group""" , ) __magic_name__: List[Any] = True elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ): load_adapter(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) __magic_name__: Union[str, Any] = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: __magic_name__: str = True if "*" in mapped_key: __magic_name__: Dict = name.split(__UpperCAmelCase )[0].split(""".""" )[-2] __magic_name__: Optional[int] = mapped_key.replace("""*""" , __UpperCAmelCase ) if "weight_g" in name: __magic_name__: Optional[Any] = """weight_g""" elif "weight_v" in name: __magic_name__: Dict = """weight_v""" elif "bias" in name: __magic_name__: List[Any] = """bias""" elif "weight" in name: __magic_name__: int = """weight""" else: __magic_name__: Optional[Any] = None set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) continue if not is_used: unused_weights.append(__UpperCAmelCase ) logger.warning(f'Unused weights: {unused_weights}' ) def a ( __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] ) -> Union[str, Any]: __magic_name__: List[Any] = full_name.split("""conv_layers.""" )[-1] __magic_name__: str = name.split(""".""" ) __magic_name__: List[Any] = int(items[0] ) __magic_name__: int = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) __magic_name__: Dict = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) __magic_name__: Any = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was' " found." ) __magic_name__: int = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) __magic_name__: Tuple = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(__UpperCAmelCase ) def a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] ) -> str: __magic_name__: Dict = full_name.split("""adaptor.""" )[-1] __magic_name__: Dict = name.split(""".""" ) if items[1].isdigit(): __magic_name__: Optional[int] = int(items[1] ) else: __magic_name__: Any = None if "adaptor" not in full_name: if "proj_ln" in full_name: # has to be layer norm if "bias" in name: assert ( value.shape == adapter.proj_layer_norm.bias.data.shape ), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.' __magic_name__: str = value logger.info(f'Adapter proj layer norm bias was initialized from {full_name}.' ) if "weight" in name: assert ( value.shape == adapter.proj_layer_norm.weight.data.shape ), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.' __magic_name__: Tuple = value else: # has to be projection layer if "bias" in name: assert ( value.shape == adapter.proj.bias.data.shape ), f'{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.' __magic_name__: Dict = value logger.info(f'Adapter proj layer bias was initialized from {full_name}.' ) if "weight" in name: assert ( value.shape == adapter.proj.weight.data.shape ), f'{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.' __magic_name__: Optional[int] = value logger.info(f'Adapter proj layer weight was initialized from {full_name}.' ) elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): if "bias" in name: assert ( value.shape == adapter.layers[layer_id].conv.bias.data.shape ), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.' __magic_name__: int = value logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' ) elif "weight" in name: assert ( value.shape == adapter.layers[layer_id].conv.weight.data.shape ), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.' __magic_name__: Optional[Any] = value logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' ) else: unused_weights.append(__UpperCAmelCase ) def a ( __UpperCAmelCase : List[str] ) -> Any: __magic_name__, __magic_name__: Tuple = emb.weight.shape __magic_name__: Union[str, Any] = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase ) __magic_name__: Optional[Any] = emb.weight.data return lin_layer @torch.no_grad() def a ( __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , ) -> Optional[int]: __magic_name__: Optional[int] = WavaVecaConfig.from_pretrained( __UpperCAmelCase , add_adapter=__UpperCAmelCase , adapter_stride=__UpperCAmelCase , adapter_kernel_size=__UpperCAmelCase , use_auth_token=__UpperCAmelCase , output_hidden_size=__UpperCAmelCase , ) __magic_name__: Optional[int] = MBartConfig.from_pretrained(__UpperCAmelCase ) # load model __magic_name__, __magic_name__, __magic_name__: str = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={ """config_yaml""": config_yaml_path, """data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path, """load_pretrained_decoder_from""": None, } , ) __magic_name__: Tuple = model[0].eval() # load feature extractor __magic_name__: Tuple = WavaVecaFeatureExtractor.from_pretrained(__UpperCAmelCase , use_auth_token=__UpperCAmelCase ) # set weights for wav2vec2 encoder __magic_name__: int = WavaVecaModel(__UpperCAmelCase ) recursively_load_weights_wavaveca(model.encoder , __UpperCAmelCase ) # load decoder weights __magic_name__: Optional[Any] = MBartForCausalLM(__UpperCAmelCase ) __magic_name__, __magic_name__: Dict = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__UpperCAmelCase ) logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' ) logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' ) __magic_name__: List[str] = SpeechEncoderDecoderModel(encoder=__UpperCAmelCase , decoder=__UpperCAmelCase ) __magic_name__: Optional[int] = False __magic_name__: Union[str, Any] = MBartaaTokenizer(__UpperCAmelCase ) tokenizer.save_pretrained(__UpperCAmelCase ) __magic_name__: List[Any] = hf_wavavec.config.to_dict() __magic_name__: Any = tokenizer.pad_token_id __magic_name__: Union[str, Any] = tokenizer.bos_token_id __magic_name__: Optional[int] = tokenizer.eos_token_id __magic_name__: int = """mbart50""" __magic_name__: int = """wav2vec2""" __magic_name__: str = tokenizer.eos_token_id __magic_name__: Tuple = 2_5_0_0_0_4 __magic_name__: Optional[int] = tokenizer.eos_token_id __magic_name__: List[Any] = SpeechEncoderDecoderConfig.from_dict(__UpperCAmelCase ) hf_wavavec.save_pretrained(__UpperCAmelCase ) feature_extractor.save_pretrained(__UpperCAmelCase ) if __name__ == "__main__": __lowerCamelCase = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_yaml_path', default=None, type=str, help='Path to yaml file of fine-tuned model') parser.add_argument( '--encoder_config_path', default='facebook/wav2vec2-xls-r-1b', type=str, help='Path to hf encoder wav2vec2 checkpoint config', ) parser.add_argument( '--decoder_config_path', default='facebook/mbart-large-50-one-to-many-mmt', type=str, help='Path to hf decoder checkpoint config', ) parser.add_argument('--add_adapter', default=True, type=bool, help='whethere to add model adapter layers') parser.add_argument('--adapter_stride', default=2, type=int, help='stride of adapter layers') parser.add_argument('--adapter_kernel_size', default=3, type=int, help='kernel size of adapter layers') parser.add_argument('--encoder_output_dim', default=10_24, type=int, help='encoder output dim') parser.add_argument('--start_token_id', default=25_00_04, type=int, help='`decoder_start_token_id` of model config') __lowerCamelCase = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, args.config_yaml_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, add_adapter=args.add_adapter, adapter_kernel_size=args.adapter_kernel_size, adapter_stride=args.adapter_stride, decoder_start_token_id=args.start_token_id, encoder_output_dim=args.encoder_output_dim, )
96
"""simple docstring""" import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class __A : def __init__( self : str , __snake_case : int , __snake_case : Dict=1_3 , __snake_case : Dict=7 , __snake_case : str=False , __snake_case : Dict=True , __snake_case : Tuple=False , __snake_case : List[Any]=False , __snake_case : Dict=1_9 , __snake_case : Dict=3_2 , __snake_case : Union[str, Any]=5 , __snake_case : List[Any]=4 , __snake_case : List[Any]=3_7 , __snake_case : Union[str, Any]="gelu" , __snake_case : List[str]=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : List[str]=5_1_2 , __snake_case : List[str]=1_6 , __snake_case : Tuple=2 , __snake_case : Optional[Any]=0.02 , __snake_case : str=3 , __snake_case : List[str]=4 , __snake_case : Optional[int]=None , ) -> Optional[int]: __magic_name__: Any = parent __magic_name__: Optional[Any] = batch_size __magic_name__: Optional[Any] = seq_length __magic_name__: Optional[Any] = is_training __magic_name__: str = use_input_mask __magic_name__: Optional[Any] = use_token_type_ids __magic_name__: List[str] = use_labels __magic_name__: List[str] = vocab_size __magic_name__: Union[str, Any] = hidden_size __magic_name__: List[Any] = num_hidden_layers __magic_name__: Dict = num_attention_heads __magic_name__: int = intermediate_size __magic_name__: List[Any] = hidden_act __magic_name__: int = hidden_dropout_prob __magic_name__: List[str] = attention_probs_dropout_prob __magic_name__: Tuple = max_position_embeddings __magic_name__: Any = type_vocab_size __magic_name__: Union[str, Any] = type_sequence_label_size __magic_name__: List[str] = initializer_range __magic_name__: Optional[Any] = num_labels __magic_name__: List[str] = num_choices __magic_name__: Tuple = scope def lowerCamelCase__ ( self : Optional[int] ) -> Any: __magic_name__: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__: Dict = None if self.use_input_mask: __magic_name__: Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__: str = None __magic_name__: Optional[Any] = None __magic_name__: int = None if self.use_labels: __magic_name__: List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__: str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__: Dict = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__: Dict = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase__ ( self : int ) -> Any: __magic_name__: List[Any] = EsmConfig( vocab_size=3_3 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__snake_case , esmfold_config={"""trunk""": {"""num_blocks""": 2}, """fp16_esm""": False} , ) return config def lowerCamelCase__ ( self : Optional[int] , __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : Any , __snake_case : Union[str, Any] ) -> Optional[Any]: __magic_name__: str = EsmForProteinFolding(config=__snake_case ).float() model.to(__snake_case ) model.eval() __magic_name__: str = model(__snake_case , attention_mask=__snake_case ) __magic_name__: Any = model(__snake_case ) __magic_name__: Dict = model(__snake_case ) self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 1_4, 3) ) self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) ) def lowerCamelCase__ ( self : Any ) -> int: __magic_name__: str = self.prepare_config_and_inputs() ( ( __magic_name__ ), ( __magic_name__ ), ( __magic_name__ ), ( __magic_name__ ), ( __magic_name__ ), ( __magic_name__ ), ): Union[str, Any] = config_and_inputs __magic_name__: Any = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __A ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ): UpperCAmelCase__ = False UpperCAmelCase__ = (EsmForProteinFolding,) if is_torch_available() else () UpperCAmelCase__ = () UpperCAmelCase__ = {} if is_torch_available() else {} UpperCAmelCase__ = False def lowerCamelCase__ ( self : List[Any] ) -> Optional[Any]: __magic_name__: Optional[int] = EsmFoldModelTester(self ) __magic_name__: Optional[Any] = ConfigTester(self , config_class=__snake_case , hidden_size=3_7 ) def lowerCamelCase__ ( self : Optional[int] ) -> Any: self.config_tester.run_common_tests() def lowerCamelCase__ ( self : List[str] ) -> str: __magic_name__: Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case ) @unittest.skip("""Does not support attention outputs""" ) def lowerCamelCase__ ( self : List[str] ) -> str: pass @unittest.skip def lowerCamelCase__ ( self : List[Any] ) -> Tuple: pass @unittest.skip("""Esm does not support embedding resizing""" ) def lowerCamelCase__ ( self : List[Any] ) -> Optional[int]: pass @unittest.skip("""Esm does not support embedding resizing""" ) def lowerCamelCase__ ( self : str ) -> str: pass @unittest.skip("""ESMFold does not support passing input embeds!""" ) def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]: pass @unittest.skip("""ESMFold does not support head pruning.""" ) def lowerCamelCase__ ( self : Any ) -> List[str]: pass @unittest.skip("""ESMFold does not support head pruning.""" ) def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]: pass @unittest.skip("""ESMFold does not support head pruning.""" ) def lowerCamelCase__ ( self : int ) -> Tuple: pass @unittest.skip("""ESMFold does not support head pruning.""" ) def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]: pass @unittest.skip("""ESMFold does not support head pruning.""" ) def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]: pass @unittest.skip("""ESMFold does not output hidden states in the normal way.""" ) def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]: pass @unittest.skip("""ESMfold does not output hidden states in the normal way.""" ) def lowerCamelCase__ ( self : Any ) -> int: pass @unittest.skip("""ESMFold only has one output format.""" ) def lowerCamelCase__ ( self : Union[str, Any] ) -> str: pass @unittest.skip("""This test doesn't work for ESMFold and doesn't test core functionality""" ) def lowerCamelCase__ ( self : List[Any] ) -> Dict: pass @unittest.skip("""ESMFold does not support input chunking.""" ) def lowerCamelCase__ ( self : int ) -> Any: pass @unittest.skip("""ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments.""" ) def lowerCamelCase__ ( self : int ) -> Union[str, Any]: pass @unittest.skip("""ESMFold doesn't support torchscript compilation.""" ) def lowerCamelCase__ ( self : Any ) -> Optional[Any]: pass @unittest.skip("""ESMFold doesn't support torchscript compilation.""" ) def lowerCamelCase__ ( self : Optional[int] ) -> int: pass @unittest.skip("""ESMFold doesn't support torchscript compilation.""" ) def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]: pass @unittest.skip("""ESMFold doesn't support data parallel.""" ) def lowerCamelCase__ ( self : List[str] ) -> str: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCamelCase__ ( self : Union[str, Any] ) -> Dict: pass @require_torch class __A ( SCREAMING_SNAKE_CASE_ ): @slow def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]: __magic_name__: Any = EsmForProteinFolding.from_pretrained("""facebook/esmfold_v1""" ).float() model.eval() __magic_name__: Optional[int] = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] ) __magic_name__: Dict = model(__snake_case )["""positions"""] __magic_name__: Tuple = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa ) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __snake_case , atol=1E-4 ) )
96
1
from manim import * class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' def a_ ( self : int ) -> Optional[Any]: """simple docstring""" A__ = Rectangle(height=0.5 , width=0.5 ) A__ = Rectangle(height=0.2_5 , width=0.2_5 ) A__ = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 ) A__ = [mem.copy() for i in range(6 )] A__ = [mem.copy() for i in range(6 )] A__ = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 ) A__ = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 ) A__ = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 ) A__ = Text("""CPU""" , font_size=24 ) A__ = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__lowerCAmelCase ) A__ = [mem.copy() for i in range(4 )] A__ = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 ) A__ = Text("""GPU""" , font_size=24 ) A__ = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(__lowerCAmelCase ) A__ = [mem.copy() for i in range(6 )] A__ = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 ) A__ = Text("""Model""" , font_size=24 ) A__ = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(__lowerCAmelCase ) A__ = [] A__ = [] A__ = [] for i, rect in enumerate(__lowerCAmelCase ): rect.set_stroke(__lowerCAmelCase ) A__ = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(__lowerCAmelCase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=__lowerCAmelCase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=__lowerCAmelCase , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowerCAmelCase , buff=0.0 ) self.add(__lowerCAmelCase ) model_cpu_arr.append(__lowerCAmelCase ) self.add(*__lowerCAmelCase , *__lowerCAmelCase , *__lowerCAmelCase ) A__ = [mem.copy() for i in range(6 )] A__ = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 ) A__ = Text("""Loaded Checkpoint""" , font_size=24 ) A__ = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase ) checkpoint.move_to([3, 0.5, 0] ) self.add(__lowerCAmelCase ) A__ = [] A__ = [] for i, rect in enumerate(__lowerCAmelCase ): A__ = fill.copy().set_fill(__lowerCAmelCase , opacity=0.7 ) target.move_to(__lowerCAmelCase ) ckpt_arr.append(__lowerCAmelCase ) A__ = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(__lowerCAmelCase ) self.add(*__lowerCAmelCase , *__lowerCAmelCase ) A__ = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) A__ = MarkupText( f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__lowerCAmelCase , __lowerCAmelCase ) A__ = MarkupText( f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , ) blue_text.next_to(__lowerCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(__lowerCAmelCase ) A__ = MarkupText( f'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' , font_size=24 , ) step_a.move_to([2, 2, 0] ) A__ = [meta_mem.copy() for i in range(6 )] A__ = [meta_mem.copy() for i in range(6 )] A__ = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 ) A__ = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 ) A__ = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 ) A__ = Text("""Disk""" , font_size=24 ) A__ = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase ) disk.move_to([-4.0, -1.2_5, 0] ) self.play(Write(__lowerCAmelCase , run_time=3 ) , Write(__lowerCAmelCase , run_time=1 ) , Create(__lowerCAmelCase , run_time=1 ) ) A__ = [] for i, rect in enumerate(__lowerCAmelCase ): A__ = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(__lowerCAmelCase , run_time=1.5 ) ) self.play(*__lowerCAmelCase ) self.play(FadeOut(__lowerCAmelCase ) ) A__ = MarkupText(f'Then, the checkpoint is removed from memory\nthrough garbage collection.' , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(__lowerCAmelCase , run_time=3 ) ) self.play( FadeOut(__lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase , *__lowerCAmelCase ) , ) self.wait()
247
import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class A : '''simple docstring''' def __init__( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int]=13 , __lowerCAmelCase : Optional[int]=30 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : str=5 , __lowerCAmelCase : Union[str, Any]=4 , __lowerCAmelCase : Tuple=37 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str=10 , __lowerCAmelCase : Tuple=0.0_2 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=2 , ) -> str: """simple docstring""" A__ = parent A__ = batch_size A__ = image_size A__ = patch_size A__ = num_channels A__ = is_training A__ = use_labels A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = type_sequence_label_size A__ = initializer_range A__ = scope A__ = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) A__ = (image_size // patch_size) ** 2 A__ = num_patches + 2 def a_ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ = self.get_config() return config, pixel_values, labels def a_ ( self : str ) -> Union[str, Any]: """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def a_ ( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : int ) -> str: """simple docstring""" A__ = DeiTModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() A__ = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a_ ( self : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ) -> Any: """simple docstring""" A__ = DeiTForMaskedImageModeling(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() A__ = model(__lowerCAmelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A__ = 1 A__ = DeiTForMaskedImageModeling(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A__ = model(__lowerCAmelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def a_ ( self : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] ) -> List[str]: """simple docstring""" A__ = self.type_sequence_label_size A__ = DeiTForImageClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() A__ = model(__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A__ = 1 A__ = DeiTForImageClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A__ = model(__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def a_ ( self : Optional[int] ) -> List[Any]: """simple docstring""" A__ = self.prepare_config_and_inputs() ( ( A__ ) , ( A__ ) , ( A__ ) , ) = config_and_inputs A__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class A (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' __lowerCamelCase : int = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) __lowerCamelCase : Any = ( { '''feature-extraction''': DeiTModel, '''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) __lowerCamelCase : Optional[Any] = False __lowerCamelCase : Optional[int] = False __lowerCamelCase : Union[str, Any] = False def a_ ( self : Any ) -> Any: """simple docstring""" A__ = DeiTModelTester(self ) A__ = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 ) def a_ ( self : str ) -> List[Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""DeiT does not use inputs_embeds""" ) def a_ ( self : int ) -> Any: """simple docstring""" pass def a_ ( self : Dict ) -> Union[str, Any]: """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(__lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) ) def a_ ( self : Any ) -> List[str]: """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(__lowerCAmelCase ) A__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) def a_ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def a_ ( self : Optional[Any] ) -> str: """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCAmelCase ) def a_ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase ) def a_ ( self : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any=False ) -> Dict: """simple docstring""" A__ = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def a_ ( self : Optional[int] ) -> List[str]: """simple docstring""" if not self.model_tester.is_training: return A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(__lowerCAmelCase ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue A__ = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() A__ = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) A__ = model(**__lowerCAmelCase ).loss loss.backward() def a_ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return A__ = False A__ = True for model_class in self.all_model_classes: if model_class in get_values(__lowerCAmelCase ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue A__ = model_class(__lowerCAmelCase ) model.gradient_checkpointing_enable() model.to(__lowerCAmelCase ) model.train() A__ = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) A__ = model(**__lowerCAmelCase ).loss loss.backward() def a_ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = [ {"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float}, {"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long}, {"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(__lowerCAmelCase ), *get_values(__lowerCAmelCase ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f'Testing {model_class} with {problem_type["title"]}' ): A__ = problem_type["""title"""] A__ = problem_type["""num_labels"""] A__ = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() A__ = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if problem_type["num_labels"] > 1: A__ = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] ) A__ = inputs["""labels"""].to(problem_type["""dtype"""] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=__lowerCAmelCase ) as warning_list: A__ = model(**__lowerCAmelCase ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f'Something is going wrong in the regression problem: intercepted {w.message}' ) loss.backward() @slow def a_ ( self : Any ) -> Dict: """simple docstring""" for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = DeiTModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def __lowerCamelCase ( ) -> Dict: """simple docstring""" A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class A (unittest.TestCase ): '''simple docstring''' @cached_property def a_ ( self : Optional[int] ) -> Dict: """simple docstring""" return ( DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ) if is_vision_available() else None ) @slow def a_ ( self : Dict ) -> str: """simple docstring""" A__ = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to( __lowerCAmelCase ) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=__lowerCAmelCase , return_tensors="""pt""" ).to(__lowerCAmelCase ) # forward pass with torch.no_grad(): A__ = model(**__lowerCAmelCase ) # verify the logits A__ = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCAmelCase ) A__ = torch.tensor([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def a_ ( self : Dict ) -> Dict: """simple docstring""" A__ = DeiTModel.from_pretrained( """facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" ) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=__lowerCAmelCase , return_tensors="""pt""" ) A__ = inputs.pixel_values.to(__lowerCAmelCase ) # forward pass to make sure inference works in fp16 with torch.no_grad(): A__ = model(__lowerCAmelCase )
247
1
'''simple docstring''' import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger __snake_case : Union[str, Any] = get_logger(__name__) __snake_case : Dict = R'''\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n''' class lowercase_ : @add_start_docstrings(_UpperCamelCase ) def __call__( self , UpperCamelCase__ , UpperCamelCase__ ) -> jnp.ndarray: """simple docstring""" raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) class lowercase_ : @add_start_docstrings(_UpperCamelCase ) def __call__( self , UpperCamelCase__ , UpperCamelCase__ ) -> jnp.ndarray: """simple docstring""" raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) class lowercase_ ( __lowerCamelCase ): @add_start_docstrings(_UpperCamelCase ) def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> jnp.ndarray: """simple docstring""" for processor in self: UpperCAmelCase_ = inspect.signature(processor.__call__ ).parameters if len(_UpperCamelCase ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( F"""Make sure that all the required parameters: {list(function_args.keys() )} for """ F"""{processor.__class__} are passed to the logits processor.""" ) UpperCAmelCase_ = processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) else: UpperCAmelCase_ = processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) return scores class lowercase_ ( __lowerCamelCase ): def __init__( self , UpperCamelCase__ ) -> List[str]: """simple docstring""" if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not (temperature > 0): raise ValueError(F"""`temperature` has to be a strictly positive float, but is {temperature}""" ) UpperCAmelCase_ = temperature def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> jnp.ndarray: """simple docstring""" UpperCAmelCase_ = scores / self.temperature return scores class lowercase_ ( __lowerCamelCase ): def __init__( self , UpperCamelCase__ , UpperCamelCase__ = -float("Inf" ) , UpperCamelCase__ = 1 ) -> Optional[int]: """simple docstring""" if not isinstance(_UpperCamelCase , _UpperCamelCase ) or (top_p < 0 or top_p > 1.0): raise ValueError(F"""`top_p` has to be a float > 0 and < 1, but is {top_p}""" ) if not isinstance(_UpperCamelCase , _UpperCamelCase ) or (min_tokens_to_keep < 1): raise ValueError(F"""`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}""" ) UpperCAmelCase_ = top_p UpperCAmelCase_ = filter_value UpperCAmelCase_ = min_tokens_to_keep def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> jnp.ndarray: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = lax.top_k(_UpperCamelCase , scores.shape[-1] ) UpperCAmelCase_ = jnp.full_like(_UpperCamelCase , self.filter_value ) UpperCAmelCase_ = jax.nn.softmax(_UpperCamelCase , axis=-1 ).cumsum(axis=-1 ) UpperCAmelCase_ = cumulative_probs < self.top_p # include the token that is higher than top_p as well UpperCAmelCase_ = jnp.roll(_UpperCamelCase , 1 ) score_mask |= score_mask.at[:, 0].set(_UpperCamelCase ) # min tokens to keep UpperCAmelCase_ = score_mask.at[:, : self.min_tokens_to_keep].set(_UpperCamelCase ) UpperCAmelCase_ = jnp.where(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ = jax.lax.sort_key_val(_UpperCamelCase , _UpperCamelCase )[-1] return next_scores class lowercase_ ( __lowerCamelCase ): def __init__( self , UpperCamelCase__ , UpperCamelCase__ = -float("Inf" ) , UpperCamelCase__ = 1 ) -> List[Any]: """simple docstring""" if not isinstance(_UpperCamelCase , _UpperCamelCase ) or top_k <= 0: raise ValueError(F"""`top_k` has to be a strictly positive integer, but is {top_k}""" ) UpperCAmelCase_ = max(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ = filter_value def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> jnp.ndarray: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = scores.shape UpperCAmelCase_ = jnp.full(batch_size * vocab_size , self.filter_value ) UpperCAmelCase_ = min(self.top_k , scores.shape[-1] ) # Safety check UpperCAmelCase_ , UpperCAmelCase_ = lax.top_k(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ = jnp.broadcast_to((jnp.arange(_UpperCamelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() UpperCAmelCase_ = topk_scores.flatten() UpperCAmelCase_ = topk_indices.flatten() + shift UpperCAmelCase_ = next_scores_flat.at[topk_indices_flat].set(_UpperCamelCase ) UpperCAmelCase_ = next_scores_flat.reshape(_UpperCamelCase , _UpperCamelCase ) return next_scores class lowercase_ ( __lowerCamelCase ): def __init__( self , UpperCamelCase__ ) -> Tuple: """simple docstring""" UpperCAmelCase_ = bos_token_id def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> jnp.ndarray: """simple docstring""" UpperCAmelCase_ = jnp.full(scores.shape , -float("inf" ) ) UpperCAmelCase_ = 1 - jnp.bool_(cur_len - 1 ) UpperCAmelCase_ = jnp.where(_UpperCamelCase , new_scores.at[:, self.bos_token_id].set(0 ) , _UpperCamelCase ) return scores class lowercase_ ( __lowerCamelCase ): def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict: """simple docstring""" UpperCAmelCase_ = max_length UpperCAmelCase_ = eos_token_id def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> jnp.ndarray: """simple docstring""" UpperCAmelCase_ = jnp.full(scores.shape , -float("inf" ) ) UpperCAmelCase_ = 1 - jnp.bool_(cur_len - self.max_length + 1 ) UpperCAmelCase_ = jnp.where(_UpperCamelCase , new_scores.at[:, self.eos_token_id].set(0 ) , _UpperCamelCase ) return scores class lowercase_ ( __lowerCamelCase ): def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]: """simple docstring""" if not isinstance(_UpperCamelCase , _UpperCamelCase ) or min_length < 0: raise ValueError(F"""`min_length` has to be a positive integer, but is {min_length}""" ) if not isinstance(_UpperCamelCase , _UpperCamelCase ) or eos_token_id < 0: raise ValueError(F"""`eos_token_id` has to be a positive integer, but is {eos_token_id}""" ) UpperCAmelCase_ = min_length UpperCAmelCase_ = eos_token_id def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> jnp.ndarray: """simple docstring""" UpperCAmelCase_ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) UpperCAmelCase_ = jnp.where(_UpperCamelCase , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , _UpperCamelCase ) return scores class lowercase_ ( __lowerCamelCase ): def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> int: """simple docstring""" UpperCAmelCase_ = list(_UpperCamelCase ) UpperCAmelCase_ = begin_index def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ = 1 - jnp.bool_(cur_len - self.begin_index ) UpperCAmelCase_ = jnp.where(_UpperCamelCase , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , _UpperCamelCase ) return scores class lowercase_ ( __lowerCamelCase ): def __init__( self , UpperCamelCase__ ) -> str: """simple docstring""" UpperCAmelCase_ = list(_UpperCamelCase ) def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> jnp.ndarray: """simple docstring""" UpperCAmelCase_ = scores.at[..., self.suppress_tokens].set(-float("inf" ) ) return scores class lowercase_ ( __lowerCamelCase ): def __init__( self , UpperCamelCase__ ) -> List[Any]: """simple docstring""" UpperCAmelCase_ = dict(_UpperCamelCase ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. UpperCAmelCase_ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: UpperCAmelCase_ = force_token_array.at[index].set(_UpperCamelCase ) UpperCAmelCase_ = jnp.intaa(_UpperCamelCase ) def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> jnp.ndarray: """simple docstring""" def _force_token(UpperCamelCase__ ): UpperCAmelCase_ = scores.shape[0] UpperCAmelCase_ = self.force_token_array[generation_idx] UpperCAmelCase_ = jnp.ones_like(_UpperCamelCase , dtype=scores.dtype ) * -float("inf" ) UpperCAmelCase_ = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) UpperCAmelCase_ = lax.dynamic_update_slice(_UpperCamelCase , _UpperCamelCase , (0, current_token) ) return new_scores UpperCAmelCase_ = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(_UpperCamelCase ) , lambda: scores , ) , ) return scores class lowercase_ ( __lowerCamelCase ): def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict: """simple docstring""" UpperCAmelCase_ = generate_config.eos_token_id UpperCAmelCase_ = generate_config.no_timestamps_token_id UpperCAmelCase_ = generate_config.no_timestamps_token_id + 1 UpperCAmelCase_ = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(_UpperCamelCase , "max_initial_timestamp_index" ): UpperCAmelCase_ = generate_config.max_initial_timestamp_index else: UpperCAmelCase_ = model_config.vocab_size if self.max_initial_timestamp_index is None: UpperCAmelCase_ = model_config.vocab_size def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]: """simple docstring""" UpperCAmelCase_ = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) ) def handle_pairs(UpperCamelCase__ , UpperCamelCase__ ): UpperCAmelCase_ = jnp.where((cur_len - self.begin_index) >= 1 , _UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _UpperCamelCase , ) UpperCAmelCase_ = jnp.where((cur_len - self.begin_index) < 2 , _UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , _UpperCamelCase , _UpperCamelCase , ) return jnp.where( _UpperCamelCase , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , _UpperCamelCase , ) UpperCAmelCase_ = jax.vmap(_UpperCamelCase )(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ = jnp.where(cur_len == self.begin_index , _UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _UpperCamelCase , ) UpperCAmelCase_ = self.timestamp_begin + self.max_initial_timestamp_index UpperCAmelCase_ = jnp.where( _UpperCamelCase , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , _UpperCamelCase , ) # if sum of probability over timestamps is above any other token, sample timestamp UpperCAmelCase_ = jax.nn.log_softmax(_UpperCamelCase , axis=-1 ) def handle_cumulative_probs(UpperCamelCase__ , UpperCamelCase__ ): UpperCAmelCase_ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) UpperCAmelCase_ = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , _UpperCamelCase , ) UpperCAmelCase_ = jax.vmap(_UpperCamelCase )(_UpperCamelCase , _UpperCamelCase ) return scores
660
from argparse import ArgumentParser from .env import EnvironmentCommand def UpperCAmelCase_ ( ): SCREAMING_SNAKE_CASE__ =ArgumentParser("""Diffusers CLI tool""", usage="""diffusers-cli <command> [<args>]""" ) SCREAMING_SNAKE_CASE__ =parser.add_subparsers(help="""diffusers-cli command helpers""" ) # Register commands EnvironmentCommand.register_subcommand(__UpperCamelCase ) # Let's go SCREAMING_SNAKE_CASE__ =parser.parse_args() if not hasattr(__UpperCamelCase, """func""" ): parser.print_help() exit(1 ) # Run SCREAMING_SNAKE_CASE__ =args.func(__UpperCamelCase ) service.run() if __name__ == "__main__": main()
151
0
'''simple docstring''' import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def lowerCamelCase_ ( ) -> int: UpperCAmelCase_ : Dict = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png''' UpperCAmelCase_ : Optional[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE__, stream=SCREAMING_SNAKE_CASE__ ).raw ).convert('''RGB''' ) return image def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]: UpperCAmelCase_ : Any = [] # fmt: off # vision encoder rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') ) rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') ) rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') ) rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") ) # QFormer rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') ) rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') ) # fmt: on return rename_keys def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : List[Any] ) -> Any: UpperCAmelCase_ : int = dct.pop(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Union[str, Any] = val def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : Tuple ) -> List[str]: for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases UpperCAmelCase_ : Optional[int] = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" ) UpperCAmelCase_ : Any = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" ) # next, set bias in the state dict UpperCAmelCase_ : Dict = torch.cat((q_bias, torch.zeros_like(SCREAMING_SNAKE_CASE__, requires_grad=SCREAMING_SNAKE_CASE__ ), v_bias) ) UpperCAmelCase_ : Union[str, Any] = qkv_bias def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str: UpperCAmelCase_ : Optional[int] = 364 if '''coco''' in model_name else 224 UpperCAmelCase_ : List[str] = BlipaVisionConfig(image_size=SCREAMING_SNAKE_CASE__ ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: UpperCAmelCase_ : Dict = OPTConfig.from_pretrained('''facebook/opt-2.7b''', eos_token_id=SCREAMING_SNAKE_CASE__ ).to_dict() elif "opt-6.7b" in model_name: UpperCAmelCase_ : int = OPTConfig.from_pretrained('''facebook/opt-6.7b''', eos_token_id=SCREAMING_SNAKE_CASE__ ).to_dict() elif "t5-xl" in model_name: UpperCAmelCase_ : Any = TaConfig.from_pretrained('''google/flan-t5-xl''', dense_act_fn='''gelu''', bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: UpperCAmelCase_ : str = TaConfig.from_pretrained('''google/flan-t5-xxl''', dense_act_fn='''gelu''', bos_token_id=1 ).to_dict() UpperCAmelCase_ : str = BlipaConfig(vision_config=SCREAMING_SNAKE_CASE__, text_config=SCREAMING_SNAKE_CASE__ ) return config, image_size @torch.no_grad() def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : List[str]=None, SCREAMING_SNAKE_CASE__ : Tuple=False ) -> Optional[Any]: UpperCAmelCase_ : Union[str, Any] = ( AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' ) if '''opt''' in model_name else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' ) ) UpperCAmelCase_ : Union[str, Any] = tokenizer('''\n''', add_special_tokens=SCREAMING_SNAKE_CASE__ ).input_ids[0] UpperCAmelCase_ : List[str] = get_blipa_config(SCREAMING_SNAKE_CASE__, eos_token_id=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Optional[int] = BlipaForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval() UpperCAmelCase_ : Optional[int] = { '''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''), '''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''), '''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''), '''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''), '''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''), '''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''), '''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''), } UpperCAmelCase_ : List[str] = model_name_to_original[model_name] # load original model print('''Loading original model...''' ) UpperCAmelCase_ : List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu''' UpperCAmelCase_ : List[Any] = load_model_and_preprocess( name=SCREAMING_SNAKE_CASE__, model_type=SCREAMING_SNAKE_CASE__, is_eval=SCREAMING_SNAKE_CASE__, device=SCREAMING_SNAKE_CASE__ ) original_model.eval() print('''Done!''' ) # update state dict keys UpperCAmelCase_ : Union[str, Any] = original_model.state_dict() UpperCAmelCase_ : Dict = create_rename_keys(SCREAMING_SNAKE_CASE__ ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): UpperCAmelCase_ : int = state_dict.pop(SCREAMING_SNAKE_CASE__ ) if key.startswith('''Qformer.bert''' ): UpperCAmelCase_ : Optional[int] = key.replace('''Qformer.bert''', '''qformer''' ) if "attention.self" in key: UpperCAmelCase_ : Any = key.replace('''self''', '''attention''' ) if "opt_proj" in key: UpperCAmelCase_ : Tuple = key.replace('''opt_proj''', '''language_projection''' ) if "t5_proj" in key: UpperCAmelCase_ : int = key.replace('''t5_proj''', '''language_projection''' ) if key.startswith('''opt''' ): UpperCAmelCase_ : int = key.replace('''opt''', '''language''' ) if key.startswith('''t5''' ): UpperCAmelCase_ : Union[str, Any] = key.replace('''t5''', '''language''' ) UpperCAmelCase_ : str = val # read in qv biases read_in_q_v_bias(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Optional[Any] = hf_model.load_state_dict(SCREAMING_SNAKE_CASE__, strict=SCREAMING_SNAKE_CASE__ ) assert len(SCREAMING_SNAKE_CASE__ ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] UpperCAmelCase_ : Optional[Any] = load_demo_image() UpperCAmelCase_ : Optional[int] = vis_processors['''eval'''](SCREAMING_SNAKE_CASE__ ).unsqueeze(0 ).to(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : List[Any] = tokenizer(['''\n'''], return_tensors='''pt''' ).input_ids.to(SCREAMING_SNAKE_CASE__ ) # create processor UpperCAmelCase_ : Any = BlipImageProcessor( size={'''height''': image_size, '''width''': image_size}, image_mean=SCREAMING_SNAKE_CASE__, image_std=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Tuple = BlipaProcessor(image_processor=SCREAMING_SNAKE_CASE__, tokenizer=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Dict = processor(images=SCREAMING_SNAKE_CASE__, return_tensors='''pt''' ).pixel_values.to(SCREAMING_SNAKE_CASE__ ) # make sure processor creates exact same pixel values assert torch.allclose(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) original_model.to(SCREAMING_SNAKE_CASE__ ) hf_model.to(SCREAMING_SNAKE_CASE__ ) with torch.no_grad(): if "opt" in model_name: UpperCAmelCase_ : List[Any] = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits UpperCAmelCase_ : Any = hf_model(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ).logits else: UpperCAmelCase_ : List[Any] = original_model( {'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits UpperCAmelCase_ : Dict = input_ids.masked_fill(input_ids == tokenizer.pad_token_id, -100 ) UpperCAmelCase_ : List[str] = hf_model(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, labels=SCREAMING_SNAKE_CASE__ ).logits assert original_logits.shape == logits.shape print('''First values of original logits:''', original_logits[0, :3, :3] ) print('''First values of HF logits:''', logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": UpperCAmelCase_ : Any = torch.tensor( [[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]], device=SCREAMING_SNAKE_CASE__ ) assert torch.allclose(logits[0, :3, :3], SCREAMING_SNAKE_CASE__, atol=1E-4 ) elif model_name == "blip2-flan-t5-xl-coco": UpperCAmelCase_ : Optional[Any] = torch.tensor( [[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]], device=SCREAMING_SNAKE_CASE__ ) else: # cast to same type UpperCAmelCase_ : List[str] = logits.dtype assert torch.allclose(original_logits.to(SCREAMING_SNAKE_CASE__ ), SCREAMING_SNAKE_CASE__, atol=1E-2 ) print('''Looks ok!''' ) print('''Generating a caption...''' ) UpperCAmelCase_ : Tuple = '''''' UpperCAmelCase_ : Any = tokenizer(SCREAMING_SNAKE_CASE__, return_tensors='''pt''' ).input_ids.to(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : int = original_model.generate({'''image''': original_pixel_values} ) UpperCAmelCase_ : str = hf_model.generate( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, do_sample=SCREAMING_SNAKE_CASE__, num_beams=5, max_length=30, min_length=1, top_p=0.9, repetition_penalty=1.0, length_penalty=1.0, temperature=1, ) print('''Original generation:''', SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : str = input_ids.shape[1] UpperCAmelCase_ : Tuple = processor.batch_decode(outputs[:, prompt_length:], skip_special_tokens=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Union[str, Any] = [text.strip() for text in output_text] print('''HF generation:''', SCREAMING_SNAKE_CASE__ ) if pytorch_dump_folder_path is not None: processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ ) if push_to_hub: processor.push_to_hub(F"""nielsr/{model_name}""" ) hf_model.push_to_hub(F"""nielsr/{model_name}""" ) if __name__ == "__main__": lowerCamelCase : int = argparse.ArgumentParser() lowerCamelCase : int = [ "blip2-opt-2.7b", "blip2-opt-6.7b", "blip2-opt-2.7b-coco", "blip2-opt-6.7b-coco", "blip2-flan-t5-xl", "blip2-flan-t5-xl-coco", "blip2-flan-t5-xxl", ] parser.add_argument( "--model_name", default="blip2-opt-2.7b", choices=choices, type=str, help="Path to hf config.json of model to convert", ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub after converting", ) lowerCamelCase : List[str] = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
707
'''simple docstring''' import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class __a (lowerCamelCase , unittest.TestCase ): __a : List[str] = BlenderbotSmallTokenizer __a : List[Any] = False def UpperCAmelCase__ ( self : str ) -> str: """simple docstring""" super().setUp() UpperCAmelCase_ : Tuple = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__'''] UpperCAmelCase_ : Optional[Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) ) UpperCAmelCase_ : int = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', ''''''] UpperCAmelCase_ : Optional[Any] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''} UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__magic_name__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__magic_name__ ) ) def UpperCAmelCase__ ( self : List[Any] , **__magic_name__ : Dict ) -> Tuple: """simple docstring""" kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ ) def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : List[str] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : str = '''adapt act apte''' UpperCAmelCase_ : Tuple = '''adapt act apte''' return input_text, output_text def UpperCAmelCase__ ( self : str ) -> Any: """simple docstring""" UpperCAmelCase_ : str = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) UpperCAmelCase_ : List[Any] = '''adapt act apte''' UpperCAmelCase_ : Dict = ['''adapt''', '''act''', '''ap@@''', '''te'''] UpperCAmelCase_ : Dict = tokenizer.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) UpperCAmelCase_ : Tuple = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] UpperCAmelCase_ : Dict = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ ) def UpperCAmelCase__ ( self : int ) -> List[str]: """simple docstring""" UpperCAmelCase_ : List[Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) assert tok('''sam''' ).input_ids == [13_84] UpperCAmelCase_ : Optional[int] = '''I am a small frog.''' UpperCAmelCase_ : List[str] = tok([src_text] , padding=__magic_name__ , truncation=__magic_name__ )['''input_ids'''] UpperCAmelCase_ : Dict = tok.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : int = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) UpperCAmelCase_ : List[Any] = '''I am a small frog .''' UpperCAmelCase_ : Any = '''.''' UpperCAmelCase_ : List[Any] = tok(__magic_name__ )['''input_ids'''] UpperCAmelCase_ : Optional[int] = tok(__magic_name__ )['''input_ids'''] assert encoded[-1] == encoded_dot[0]
644
0
'''simple docstring''' import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __lowerCAmelCase : Dict = logging.get_logger(__name__) __lowerCAmelCase : Optional[int] = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } __lowerCAmelCase : int = { "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, "tokenizer_config_file": { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" }, } __lowerCAmelCase : List[str] = {"facebook/blenderbot-3B": 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) __UpperCAmelCase = bs[:] __UpperCAmelCase = 0 for b in range(2**8 ): if b not in bs: bs.append(UpperCamelCase__ ) cs.append(2**8 + n ) n += 1 __UpperCAmelCase = [chr(UpperCamelCase__ ) for n in cs] return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) ) def lowerCAmelCase ( UpperCamelCase__ : int ): """simple docstring""" __UpperCAmelCase = set() __UpperCAmelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __UpperCAmelCase = char return pairs class A ( UpperCAmelCase ): a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = ['''input_ids''', '''attention_mask'''] def __init__( self : str , __a : Union[str, Any] , __a : Optional[Any] , __a : List[Any]="replace" , __a : Union[str, Any]="<s>" , __a : Any="</s>" , __a : Dict="</s>" , __a : Dict="<s>" , __a : Tuple="<unk>" , __a : List[str]="<pad>" , __a : Any="<mask>" , __a : Dict=False , **__a : Union[str, Any] , ) -> Optional[int]: __UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else bos_token __UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token __UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else sep_token __UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else cls_token __UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token __UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token super().__init__( errors=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , **__a , ) with open(__a , encoding='''utf-8''' ) as vocab_handle: __UpperCAmelCase = json.load(__a ) __UpperCAmelCase = {v: k for k, v in self.encoder.items()} __UpperCAmelCase = errors # how to handle errors in decoding __UpperCAmelCase = bytes_to_unicode() __UpperCAmelCase = {v: k for k, v in self.byte_encoder.items()} with open(__a , encoding='''utf-8''' ) as merges_handle: __UpperCAmelCase = merges_handle.read().split('''\n''' )[1:-1] __UpperCAmelCase = [tuple(merge.split() ) for merge in bpe_merges] __UpperCAmelCase = dict(zip(__a , range(len(__a ) ) ) ) __UpperCAmelCase = {} __UpperCAmelCase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __UpperCAmelCase = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def snake_case__ ( self : List[Any] ) -> Union[str, Any]: return len(self.encoder ) def snake_case__ ( self : str ) -> int: return dict(self.encoder , **self.added_tokens_encoder ) def snake_case__ ( self : List[Any] , __a : Tuple ) -> List[Any]: if token in self.cache: return self.cache[token] __UpperCAmelCase = tuple(__a ) __UpperCAmelCase = get_pairs(__a ) if not pairs: return token while True: __UpperCAmelCase = min(__a , key=lambda __a : self.bpe_ranks.get(__a , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __UpperCAmelCase , __UpperCAmelCase = bigram __UpperCAmelCase = [] __UpperCAmelCase = 0 while i < len(__a ): try: __UpperCAmelCase = word.index(__a , __a ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __UpperCAmelCase = j if word[i] == first and i < len(__a ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __UpperCAmelCase = tuple(__a ) __UpperCAmelCase = new_word if len(__a ) == 1: break else: __UpperCAmelCase = get_pairs(__a ) __UpperCAmelCase = ''' '''.join(__a ) __UpperCAmelCase = word return word def snake_case__ ( self : int , __a : int ) -> List[Any]: __UpperCAmelCase = [] for token in re.findall(self.pat , __a ): __UpperCAmelCase = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__a ).split(''' ''' ) ) return bpe_tokens def snake_case__ ( self : Optional[Any] , __a : Tuple ) -> str: return self.encoder.get(__a , self.encoder.get(self.unk_token ) ) def snake_case__ ( self : Optional[int] , __a : Any ) -> List[str]: return self.decoder.get(__a ) def snake_case__ ( self : Union[str, Any] , __a : List[str] ) -> List[Any]: __UpperCAmelCase = ''''''.join(__a ) __UpperCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors ) return text def snake_case__ ( self : Union[str, Any] , __a : str , __a : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__a ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __UpperCAmelCase = os.path.join( __a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __UpperCAmelCase = os.path.join( __a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(__a , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__a , ensure_ascii=__a ) + '''\n''' ) __UpperCAmelCase = 0 with open(__a , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __a : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''' ) __UpperCAmelCase = token_index writer.write(''' '''.join(__a ) + '''\n''' ) index += 1 return vocab_file, merge_file def snake_case__ ( self : Union[str, Any] , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a ) if token_ids_a is None: return [1] + ([0] * len(__a )) + [1] return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1] def snake_case__ ( self : Dict , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]: __UpperCAmelCase = [self.sep_token_id] __UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def snake_case__ ( self : int , __a : Optional[int] , __a : int=False , **__a : Union[str, Any] ) -> Union[str, Any]: __UpperCAmelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__a ) > 0 and not text[0].isspace()): __UpperCAmelCase = ''' ''' + text return (text, kwargs) def snake_case__ ( self : List[str] , __a : List[int] , __a : Optional[List[int]] = None ) -> Dict: return token_ids_a + [self.eos_token_id] def snake_case__ ( self : Optional[Any] , __a : "Conversation" ) -> List[int]: __UpperCAmelCase = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(''' ''' + text ) else: # Generated responses should contain them already. inputs.append(__a ) __UpperCAmelCase = ''' '''.join(__a ) __UpperCAmelCase = self.encode(__a ) if len(__a ) > self.model_max_length: __UpperCAmelCase = input_ids[-self.model_max_length :] logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" ) return input_ids
262
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) __lowerCAmelCase : Optional[int] = { "huggingface/time-series-transformer-tourism-monthly": ( "https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json" ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class A ( UpperCAmelCase ): a_ = '''time_series_transformer''' a_ = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self : str , __a : Optional[int] = None , __a : Optional[int] = None , __a : str = "student_t" , __a : str = "nll" , __a : int = 1 , __a : List[int] = [1, 2, 3, 4, 5, 6, 7] , __a : Optional[Union[str, bool]] = "mean" , __a : int = 0 , __a : int = 0 , __a : int = 0 , __a : int = 0 , __a : Optional[List[int]] = None , __a : Optional[List[int]] = None , __a : int = 3_2 , __a : int = 3_2 , __a : int = 2 , __a : int = 2 , __a : int = 2 , __a : int = 2 , __a : bool = True , __a : str = "gelu" , __a : int = 6_4 , __a : float = 0.1 , __a : float = 0.1 , __a : float = 0.1 , __a : float = 0.1 , __a : float = 0.1 , __a : int = 1_0_0 , __a : float = 0.0_2 , __a : Optional[int]=True , **__a : str , ) -> int: # time series specific configuration __UpperCAmelCase = prediction_length __UpperCAmelCase = context_length or prediction_length __UpperCAmelCase = distribution_output __UpperCAmelCase = loss __UpperCAmelCase = input_size __UpperCAmelCase = num_time_features __UpperCAmelCase = lags_sequence __UpperCAmelCase = scaling __UpperCAmelCase = num_dynamic_real_features __UpperCAmelCase = num_static_real_features __UpperCAmelCase = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(__a ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) __UpperCAmelCase = cardinality else: __UpperCAmelCase = [0] if embedding_dimension and num_static_categorical_features > 0: if len(__a ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) __UpperCAmelCase = embedding_dimension else: __UpperCAmelCase = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality] __UpperCAmelCase = num_parallel_samples # Transformer architecture configuration __UpperCAmelCase = input_size * len(__a ) + self._number_of_features __UpperCAmelCase = d_model __UpperCAmelCase = encoder_attention_heads __UpperCAmelCase = decoder_attention_heads __UpperCAmelCase = encoder_ffn_dim __UpperCAmelCase = decoder_ffn_dim __UpperCAmelCase = encoder_layers __UpperCAmelCase = decoder_layers __UpperCAmelCase = dropout __UpperCAmelCase = attention_dropout __UpperCAmelCase = activation_dropout __UpperCAmelCase = encoder_layerdrop __UpperCAmelCase = decoder_layerdrop __UpperCAmelCase = activation_function __UpperCAmelCase = init_std __UpperCAmelCase = use_cache super().__init__(is_encoder_decoder=__a , **__a ) @property def snake_case__ ( self : List[Any] ) -> int: return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
262
1
"""simple docstring""" from queue import PriorityQueue from typing import Any import numpy as np def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : set , SCREAMING_SNAKE_CASE : set , SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : PriorityQueue , SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : float | int , ): '''simple docstring''' for nxt, d in graph[v]: if nxt in visited_forward: continue __lowerCamelCase : Tuple =cst_fwd.get(SCREAMING_SNAKE_CASE , np.inf ) __lowerCamelCase : str =cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) __lowerCamelCase : Union[str, Any] =new_cost_f __lowerCamelCase : List[Any] =v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: __lowerCamelCase : Tuple =cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : dict ): '''simple docstring''' __lowerCamelCase : List[Any] =-1 __lowerCamelCase : Any =set() __lowerCamelCase : List[Any] =set() __lowerCamelCase : str ={source: 0} __lowerCamelCase : List[str] ={destination: 0} __lowerCamelCase : Optional[int] ={source: None} __lowerCamelCase : Optional[Any] ={destination: None} __lowerCamelCase : PriorityQueue[Any] =PriorityQueue() __lowerCamelCase : PriorityQueue[Any] =PriorityQueue() __lowerCamelCase : int =np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): __lowerCamelCase , __lowerCamelCase : Union[str, Any] =queue_forward.get() visited_forward.add(SCREAMING_SNAKE_CASE ) __lowerCamelCase , __lowerCamelCase : List[str] =queue_backward.get() visited_backward.add(SCREAMING_SNAKE_CASE ) __lowerCamelCase : Optional[int] =pass_and_relaxation( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) __lowerCamelCase : int =pass_and_relaxation( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: __lowerCamelCase : Union[str, Any] =shortest_distance return shortest_path_distance _UpperCamelCase = { 'B': [['C', 1]], 'C': [['D', 1]], 'D': [['F', 1]], 'E': [['B', 1], ['G', 2]], 'F': [], 'G': [['F', 1]], } _UpperCamelCase = { 'B': [['E', 1]], 'C': [['B', 1]], 'D': [['C', 1]], 'F': [['D', 1], ['G', 1]], 'E': [[None, np.inf]], 'G': [['E', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
363
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class SCREAMING_SNAKE_CASE_ ( snake_case__ , unittest.TestCase ): """simple docstring""" __snake_case : Union[str, Any] = LDMTextToImagePipeline __snake_case : Optional[Any] = TEXT_TO_IMAGE_PARAMS - { """negative_prompt""", """negative_prompt_embeds""", """cross_attention_kwargs""", """prompt_embeds""", } __snake_case : str = PipelineTesterMixin.required_optional_params - { """num_images_per_prompt""", """callback""", """callback_steps""", } __snake_case : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS __snake_case : Optional[Any] = False def __lowercase ( self :List[str] ): torch.manual_seed(0 ) __lowerCamelCase : str =UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) __lowerCamelCase : str =DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__lowercase , set_alpha_to_one=__lowercase , ) torch.manual_seed(0 ) __lowerCamelCase : Optional[int] =AutoencoderKL( block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , latent_channels=4 , ) torch.manual_seed(0 ) __lowerCamelCase : Any =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) __lowerCamelCase : Optional[int] =CLIPTextModel(__lowercase ) __lowerCamelCase : Dict =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) __lowerCamelCase : Optional[int] ={ '''unet''': unet, '''scheduler''': scheduler, '''vqvae''': vae, '''bert''': text_encoder, '''tokenizer''': tokenizer, } return components def __lowercase ( self :int , __lowercase :Optional[int] , __lowercase :Optional[Any]=0 ): if str(__lowercase ).startswith('''mps''' ): __lowerCamelCase : Any =torch.manual_seed(__lowercase ) else: __lowerCamelCase : str =torch.Generator(device=__lowercase ).manual_seed(__lowercase ) __lowerCamelCase : Any ={ '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def __lowercase ( self :List[str] ): __lowerCamelCase : List[str] ='''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCamelCase : str =self.get_dummy_components() __lowerCamelCase : Optional[int] =LDMTextToImagePipeline(**__lowercase ) pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCamelCase : str =self.get_dummy_inputs(__lowercase ) __lowerCamelCase : List[Any] =pipe(**__lowercase ).images __lowerCamelCase : Optional[int] =image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) __lowerCamelCase : Optional[Any] =np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @slow @require_torch_gpu class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" def __lowercase ( self :Any ): super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowercase ( self :int , __lowercase :Any , __lowercase :Optional[int]=torch.floataa , __lowercase :Dict=0 ): __lowerCamelCase : List[str] =torch.manual_seed(__lowercase ) __lowerCamelCase : List[str] =np.random.RandomState(__lowercase ).standard_normal((1, 4, 32, 32) ) __lowerCamelCase : List[str] =torch.from_numpy(__lowercase ).to(device=__lowercase , dtype=__lowercase ) __lowerCamelCase : Any ={ '''prompt''': '''A painting of a squirrel eating a burger''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def __lowercase ( self :Tuple ): __lowerCamelCase : int =LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCamelCase : Tuple =self.get_inputs(__lowercase ) __lowerCamelCase : Optional[Any] =pipe(**__lowercase ).images __lowerCamelCase : Union[str, Any] =image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) __lowerCamelCase : Union[str, Any] =np.array([0.51825, 0.52850, 0.52543, 0.54258, 0.52304, 0.52569, 0.54363, 0.55276, 0.56878] ) __lowerCamelCase : Dict =np.abs(expected_slice - image_slice ).max() assert max_diff < 1e-3 @nightly @require_torch_gpu class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" def __lowercase ( self :Any ): super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowercase ( self :Dict , __lowercase :Optional[Any] , __lowercase :int=torch.floataa , __lowercase :Dict=0 ): __lowerCamelCase : Any =torch.manual_seed(__lowercase ) __lowerCamelCase : Dict =np.random.RandomState(__lowercase ).standard_normal((1, 4, 32, 32) ) __lowerCamelCase : str =torch.from_numpy(__lowercase ).to(device=__lowercase , dtype=__lowercase ) __lowerCamelCase : Dict ={ '''prompt''': '''A painting of a squirrel eating a burger''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 50, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def __lowercase ( self :Tuple ): __lowerCamelCase : Optional[int] =LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCamelCase : List[Any] =self.get_inputs(__lowercase ) __lowerCamelCase : Optional[int] =pipe(**__lowercase ).images[0] __lowerCamelCase : Optional[int] =load_numpy( '''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy''' ) __lowerCamelCase : Dict =np.abs(expected_image - image ).max() assert max_diff < 1e-3
363
1
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int: A__ : Tuple =1 for i in range(1, num + 1 ): fact *= i return fact def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int: A__ : Optional[Any] =0 while number > 0: A__ : List[Any] =number % 1_0 sum_of_digits += last_digit A__ : str =number // 1_0 # Removing the last_digit from the given number return sum_of_digits def SCREAMING_SNAKE_CASE__ ( snake_case_ = 1_0_0 ) -> int: A__ : List[str] =factorial(snake_case_ ) A__ : str =split_and_add(snake_case_ ) return result if __name__ == "__main__": print(solution(int(input("Enter the Number: ").strip())))
416
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : Any = logging.get_logger(__name__) __lowerCamelCase : Optional[int] = { "studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json", "studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json", } class a ( UpperCamelCase_ ): __lowercase = """luke""" def __init__( self , __UpperCamelCase=5_02_67 , __UpperCamelCase=50_00_00 , __UpperCamelCase=7_68 , __UpperCamelCase=2_56 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=30_72 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_12 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-12 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=2 , **__UpperCamelCase , )-> Dict: '''simple docstring''' super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase ) A__ : List[str] =vocab_size A__ : Any =entity_vocab_size A__ : List[Any] =hidden_size A__ : Union[str, Any] =entity_emb_size A__ : Dict =num_hidden_layers A__ : List[Any] =num_attention_heads A__ : List[str] =hidden_act A__ : Dict =intermediate_size A__ : Any =hidden_dropout_prob A__ : Dict =attention_probs_dropout_prob A__ : Optional[int] =max_position_embeddings A__ : int =type_vocab_size A__ : Optional[Any] =initializer_range A__ : Optional[int] =layer_norm_eps A__ : str =use_entity_aware_attention A__ : str =classifier_dropout
416
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase_ : str = { 'configuration_altclip': [ 'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig', ], 'processing_altclip': ['AltCLIPProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : Any = [ 'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'AltCLIPPreTrainedModel', 'AltCLIPModel', 'AltCLIPTextModel', 'AltCLIPVisionModel', ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys lowerCAmelCase_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
464
'''simple docstring''' import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCamelCase_ : def __init__( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : str=13 , lowerCAmelCase__ : List[Any]=7 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Dict=99 , lowerCAmelCase__ : List[str]=32 , lowerCAmelCase__ : str=5 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : Any=37 , lowerCAmelCase__ : Tuple="gelu" , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : str=5_12 , lowerCAmelCase__ : List[str]=16 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : Union[str, Any]=0.02 , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : Tuple=4 , lowerCAmelCase__ : List[Any]=None , ): """simple docstring""" SCREAMING_SNAKE_CASE : str = parent SCREAMING_SNAKE_CASE : Optional[Any] = batch_size SCREAMING_SNAKE_CASE : Any = seq_length SCREAMING_SNAKE_CASE : Optional[int] = is_training SCREAMING_SNAKE_CASE : Tuple = use_input_mask SCREAMING_SNAKE_CASE : Tuple = use_token_type_ids SCREAMING_SNAKE_CASE : Tuple = use_labels SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size SCREAMING_SNAKE_CASE : int = hidden_size SCREAMING_SNAKE_CASE : int = num_hidden_layers SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size SCREAMING_SNAKE_CASE : List[str] = hidden_act SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings SCREAMING_SNAKE_CASE : str = type_vocab_size SCREAMING_SNAKE_CASE : int = type_sequence_label_size SCREAMING_SNAKE_CASE : Tuple = initializer_range SCREAMING_SNAKE_CASE : Tuple = num_labels SCREAMING_SNAKE_CASE : List[Any] = num_choices SCREAMING_SNAKE_CASE : Any = scope def __lowercase ( self : int ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE : Dict = None if self.use_input_mask: SCREAMING_SNAKE_CASE : str = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE : str = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) SCREAMING_SNAKE_CASE : Tuple = None SCREAMING_SNAKE_CASE : Union[str, Any] = None SCREAMING_SNAKE_CASE : List[str] = None if self.use_labels: SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowercase ( self : List[Any] ): """simple docstring""" return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , ) def __lowercase ( self : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = BioGptModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowercase ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict , ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = BioGptForCausalLM(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowercase ( self : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : str , *lowerCAmelCase__ : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = BioGptModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() # create attention mask SCREAMING_SNAKE_CASE : List[str] = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE : Optional[Any] = self.seq_length // 2 SCREAMING_SNAKE_CASE : Any = 0 # first forward pass SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ).to_tuple() # create hypothetical next token and extent to next_input_ids SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids SCREAMING_SNAKE_CASE : List[Any] = ids_tensor((1,) , lowerCAmelCase__ ).item() + 1 SCREAMING_SNAKE_CASE : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) SCREAMING_SNAKE_CASE : str = random_other_next_tokens # append to next input_ids and attn_mask SCREAMING_SNAKE_CASE : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 ) SCREAMING_SNAKE_CASE : str = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowerCAmelCase__ )] , dim=1 , ) # get two different outputs SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )['''last_hidden_state'''] SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )['''last_hidden_state'''] # select random slice SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item() SCREAMING_SNAKE_CASE : Optional[Any] = output_from_no_past[:, -1, random_slice_idx].detach() SCREAMING_SNAKE_CASE : Union[str, Any] = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) ) def __lowercase ( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] , *lowerCAmelCase__ : Any ): """simple docstring""" SCREAMING_SNAKE_CASE : str = BioGptModel(config=lowerCAmelCase__ ).to(lowerCAmelCase__ ).eval() SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCAmelCase__ ) # first forward pass SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) SCREAMING_SNAKE_CASE : List[Any] = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )['''last_hidden_state'''] SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ )[ '''last_hidden_state''' ] # select random slice SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item() SCREAMING_SNAKE_CASE : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach() SCREAMING_SNAKE_CASE : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) ) def __lowercase ( self : int , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : str , *lowerCAmelCase__ : Any , lowerCAmelCase__ : int=False ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = BioGptForCausalLM(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) if gradient_checkpointing: model.gradient_checkpointing_enable() SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def __lowercase ( self : Any , lowerCAmelCase__ : str , *lowerCAmelCase__ : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = BioGptModel(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE : int = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def __lowercase ( self : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , *lowerCAmelCase__ : str ): """simple docstring""" SCREAMING_SNAKE_CASE : int = self.num_labels SCREAMING_SNAKE_CASE : Tuple = BioGptForTokenClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowercase ( self : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE ) ,( SCREAMING_SNAKE_CASE ) ,( SCREAMING_SNAKE_CASE ) ,( SCREAMING_SNAKE_CASE ) ,( SCREAMING_SNAKE_CASE ) ,( SCREAMING_SNAKE_CASE ) ,( SCREAMING_SNAKE_CASE ) , ) : Optional[Any] = config_and_inputs SCREAMING_SNAKE_CASE : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowerCamelCase_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ): _lowerCAmelCase : Dict = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) _lowerCAmelCase : Optional[Any] = (BioGptForCausalLM,) if is_torch_available() else () _lowerCAmelCase : List[str] = ( { 'feature-extraction': BioGptModel, 'text-classification': BioGptForSequenceClassification, 'text-generation': BioGptForCausalLM, 'token-classification': BioGptForTokenClassification, 'zero-shot': BioGptForSequenceClassification, } if is_torch_available() else {} ) _lowerCAmelCase : Dict = False def __lowercase ( self : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = BioGptModelTester(self ) SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 ) def __lowercase ( self : Dict ): """simple docstring""" self.config_tester.run_common_tests() def __lowercase ( self : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def __lowercase ( self : Any ): """simple docstring""" SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: SCREAMING_SNAKE_CASE : Optional[Any] = type self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def __lowercase ( self : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowerCAmelCase__ ) def __lowercase ( self : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*lowerCAmelCase__ , gradient_checkpointing=lowerCAmelCase__ ) def __lowercase ( self : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowerCAmelCase__ ) def __lowercase ( self : int ): """simple docstring""" SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*lowerCAmelCase__ ) def __lowercase ( self : str ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*lowerCAmelCase__ ) @slow def __lowercase ( self : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) model.to(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE : int = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = '''left''' # Define PAD Token = EOS Token = 50256 SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.eos_token SCREAMING_SNAKE_CASE : Dict = model.config.eos_token_id # use different length sentences to test batching SCREAMING_SNAKE_CASE : Any = [ '''Hello, my dog is a little''', '''Today, I''', ] SCREAMING_SNAKE_CASE : List[str] = tokenizer(lowerCAmelCase__ , return_tensors='''pt''' , padding=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE : Dict = inputs['''input_ids'''].to(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = model.generate( input_ids=lowerCAmelCase__ , attention_mask=inputs['''attention_mask'''].to(lowerCAmelCase__ ) , ) SCREAMING_SNAKE_CASE : int = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE : Any = model.generate(input_ids=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE : Tuple = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item() SCREAMING_SNAKE_CASE : List[Any] = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = model.generate(input_ids=lowerCAmelCase__ , max_length=model.config.max_length - num_paddings ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE : List[str] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE : Dict = [ '''Hello, my dog is a little bit bigger than a little bit.''', '''Today, I have a good idea of how to use the information''', ] self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , [non_padded_sentence, padded_sentence] ) @slow def __lowercase ( self : Tuple ): """simple docstring""" for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : List[Any] = BioGptModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) def __lowercase ( self : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE : Union[str, Any] = 3 SCREAMING_SNAKE_CASE : Dict = input_dict['''input_ids'''] SCREAMING_SNAKE_CASE : str = input_ids.ne(1 ).to(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) SCREAMING_SNAKE_CASE : Dict = BioGptForSequenceClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __lowercase ( self : str ): """simple docstring""" SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE : Tuple = 3 SCREAMING_SNAKE_CASE : Optional[Any] = '''multi_label_classification''' SCREAMING_SNAKE_CASE : Any = input_dict['''input_ids'''] SCREAMING_SNAKE_CASE : Any = input_ids.ne(1 ).to(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE : List[str] = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) SCREAMING_SNAKE_CASE : List[Any] = BioGptForSequenceClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class lowerCamelCase_ ( unittest.TestCase ): @slow def __lowercase ( self : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) SCREAMING_SNAKE_CASE : str = torch.tensor([[2, 48_05, 9, 6_56, 21]] ) SCREAMING_SNAKE_CASE : Dict = model(lowerCAmelCase__ )[0] SCREAMING_SNAKE_CASE : Tuple = 4_23_84 SCREAMING_SNAKE_CASE : List[Any] = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , lowerCAmelCase__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor( [[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1e-4 ) ) @slow def __lowercase ( self : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE : int = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) SCREAMING_SNAKE_CASE : Optional[Any] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) model.to(lowerCAmelCase__ ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : str = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = model.generate( **lowerCAmelCase__ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE : Any = ( '''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the''' ''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and''' ''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),''' ''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and''' ''' more than 800,000 deaths.''' ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
464
1
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available a__ : Optional[Any] = { """configuration_efficientnet""": [ """EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EfficientNetConfig""", """EfficientNetOnnxConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : str = ["""EfficientNetImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Optional[Any] = [ """EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """EfficientNetForImageClassification""", """EfficientNetModel""", """EfficientNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_efficientnet import ( EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientNetConfig, EfficientNetOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientnet import EfficientNetImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientnet import ( EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientNetForImageClassification, EfficientNetModel, EfficientNetPreTrainedModel, ) else: import sys a__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
589
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers UpperCAmelCase__ = '''3''' print('''Python version:''', sys.version) print('''transformers version:''', transformers.__version__) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) print('''NCCL version:''', torch.cuda.nccl.version()) except ImportError: print('''Torch version:''', None) try: import deepspeed print('''DeepSpeed version:''', deepspeed.__version__) except ImportError: print('''DeepSpeed version:''', None) try: import tensorflow as tf print('''TensorFlow version:''', tf.__version__) print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU'''))) print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU'''))) except ImportError: print('''TensorFlow version:''', None)
351
0
import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class UpperCamelCase__ : '''simple docstring''' @staticmethod def A__ ( *A , **A ) ->List[Any]: pass @is_pipeline_test @require_torch @require_vision class UpperCamelCase__ ( unittest.TestCase): '''simple docstring''' __a : int = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def A__ ( self , A , A , A ) ->List[Any]: UpperCAmelCase__ :Tuple = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' ) UpperCAmelCase__ :Tuple = [ { 'image': Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ), 'question': 'How many cats are there?', }, { 'image': './tests/fixtures/tests_samples/COCO/000000039769.png', 'question': 'How many cats are there?', }, ] return vqa_pipeline, examples def A__ ( self , A , A ) ->List[Any]: UpperCAmelCase__ :Optional[Any] = vqa_pipeline(A , top_k=1 ) self.assertEqual( A , [ [{'score': ANY(A ), 'answer': ANY(A )}], [{'score': ANY(A ), 'answer': ANY(A )}], ] , ) @require_torch def A__ ( self ) ->Tuple: UpperCAmelCase__ :Tuple = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' ) UpperCAmelCase__ :Optional[int] = './tests/fixtures/tests_samples/COCO/000000039769.png' UpperCAmelCase__ :List[Any] = 'How many cats are there?' UpperCAmelCase__ :Tuple = vqa_pipeline(image=A , question='How many cats are there?' , top_k=2 ) self.assertEqual( A , [{'score': ANY(A ), 'answer': ANY(A )}, {'score': ANY(A ), 'answer': ANY(A )}] ) UpperCAmelCase__ :Any = vqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual( A , [{'score': ANY(A ), 'answer': ANY(A )}, {'score': ANY(A ), 'answer': ANY(A )}] ) @slow @require_torch def A__ ( self ) ->List[str]: UpperCAmelCase__ :Optional[int] = pipeline('visual-question-answering' , model='dandelin/vilt-b32-finetuned-vqa' ) UpperCAmelCase__ :Optional[Any] = './tests/fixtures/tests_samples/COCO/000000039769.png' UpperCAmelCase__ :List[str] = 'How many cats are there?' UpperCAmelCase__ :str = vqa_pipeline(image=A , question=A , top_k=2 ) self.assertEqual( nested_simplify(A , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] ) UpperCAmelCase__ :List[str] = vqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(A , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] ) UpperCAmelCase__ :Any = vqa_pipeline( [{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 ) self.assertEqual( nested_simplify(A , decimals=4 ) , [[{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}]] * 2 , ) @require_tf @unittest.skip('Visual question answering not implemented in TF' ) def A__ ( self ) ->Optional[int]: pass
433
import argparse import struct import unittest class UpperCamelCase__ : '''simple docstring''' def __init__( self , A ) ->None: UpperCAmelCase__ :Dict = data # Initialize hash values UpperCAmelCase__ :str = [ 0x6a_09e_667, 0xbb_67a_e85, 0x3c_6ef_372, 0xa5_4ff_53a, 0x51_0e5_27f, 0x9b_056_88c, 0x1f_83d_9ab, 0x5b_e0c_d19, ] # Initialize round constants UpperCAmelCase__ :str = [ 0x42_8a2_f98, 0x71_374_491, 0xb5_c0f_bcf, 0xe9_b5d_ba5, 0x39_56c_25b, 0x59_f11_1f1, 0x92_3f8_2a4, 0xab_1c5_ed5, 0xd8_07a_a98, 0x12_835_b01, 0x24_318_5be, 0x55_0c7_dc3, 0x72_be5_d74, 0x80_deb_1fe, 0x9b_dc0_6a7, 0xc1_9bf_174, 0xe4_9b6_9c1, 0xef_be4_786, 0x0f_c19_dc6, 0x24_0ca_1cc, 0x2d_e92_c6f, 0x4a_748_4aa, 0x5c_b0a_9dc, 0x76_f98_8da, 0x98_3e5_152, 0xa8_31c_66d, 0xb0_032_7c8, 0xbf_597_fc7, 0xc6_e00_bf3, 0xd5_a79_147, 0x06_ca6_351, 0x14_292_967, 0x27_b70_a85, 0x2e_1b2_138, 0x4d_2c6_dfc, 0x53_380_d13, 0x65_0a7_354, 0x76_6a0_abb, 0x81_c2c_92e, 0x92_722_c85, 0xa2_bfe_8a1, 0xa8_1a6_64b, 0xc2_4b8_b70, 0xc7_6c5_1a3, 0xd1_92e_819, 0xd6_990_624, 0xf4_0e3_585, 0x10_6aa_070, 0x19_a4c_116, 0x1e_376_c08, 0x27_487_74c, 0x34_b0b_cb5, 0x39_1c0_cb3, 0x4e_d8a_a4a, 0x5b_9cc_a4f, 0x68_2e6_ff3, 0x74_8f8_2ee, 0x78_a56_36f, 0x84_c87_814, 0x8c_c70_208, 0x90_bef_ffa, 0xa4_506_ceb, 0xbe_f9a_3f7, 0xc6_717_8f2, ] UpperCAmelCase__ :Any = self.preprocessing(self.data ) self.final_hash() @staticmethod def A__ ( A ) ->bytes: UpperCAmelCase__ :List[Any] = b'\x80' + (b'\x00' * (63 - (len(A ) + 8) % 64)) UpperCAmelCase__ :Optional[int] = struct.pack('>Q' , (len(A ) * 8) ) return data + padding + big_endian_integer def A__ ( self ) ->None: # Convert into blocks of 64 bytes UpperCAmelCase__ :List[Any] = [ self.preprocessed_data[x : x + 64] for x in range(0 , len(self.preprocessed_data ) , 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers UpperCAmelCase__ :Any = list(struct.unpack('>16L' , A ) ) # add 48 0-ed integers words += [0] * 48 UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :List[Any] = self.hashes for index in range(0 , 64 ): if index > 15: # modify the zero-ed indexes at the end of the array UpperCAmelCase__ :Optional[Any] = ( self.ror(words[index - 15] , 7 ) ^ self.ror(words[index - 15] , 18 ) ^ (words[index - 15] >> 3) ) UpperCAmelCase__ :Any = ( self.ror(words[index - 2] , 17 ) ^ self.ror(words[index - 2] , 19 ) ^ (words[index - 2] >> 10) ) UpperCAmelCase__ :str = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x100_000_000 # Compression UpperCAmelCase__ :Tuple = self.ror(A , 6 ) ^ self.ror(A , 11 ) ^ self.ror(A , 25 ) UpperCAmelCase__ :int = (e & f) ^ ((~e & 0xff_fff_fff) & g) UpperCAmelCase__ :str = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x100_000_000 UpperCAmelCase__ :Optional[int] = self.ror(A , 2 ) ^ self.ror(A , 13 ) ^ self.ror(A , 22 ) UpperCAmelCase__ :Optional[int] = (a & b) ^ (a & c) ^ (b & c) UpperCAmelCase__ :Optional[int] = (sa + maj) % 0x100_000_000 UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :Tuple = ( g, f, e, ((d + tempa) % 0x100_000_000), c, b, a, ((tempa + tempa) % 0x100_000_000), ) UpperCAmelCase__ :str = [a, b, c, d, e, f, g, h] # Modify final values UpperCAmelCase__ :Tuple = [ ((element + mutated_hash_values[index]) % 0x100_000_000) for index, element in enumerate(self.hashes ) ] UpperCAmelCase__ :Dict = ''.join([hex(A )[2:].zfill(8 ) for value in self.hashes] ) def A__ ( self , A , A ) ->int: return 0xff_fff_fff & (value << (32 - rotations)) | (value >> rotations) class UpperCamelCase__ ( unittest.TestCase): '''simple docstring''' def A__ ( self ) ->None: import hashlib UpperCAmelCase__ :Optional[Any] = bytes('Test String' , 'utf-8' ) self.assertEqual(SHAaaa(A ).hash , hashlib.shaaaa(A ).hexdigest() ) def A ( ): """simple docstring""" import doctest doctest.testmod() UpperCAmelCase__ :Union[str, Any] = argparse.ArgumentParser() parser.add_argument( '-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , ) parser.add_argument( '-f' , '--file' , dest='input_file' , help='Hash contents of a file' ) UpperCAmelCase__ :List[Any] = parser.parse_args() UpperCAmelCase__ :List[Any] = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , 'rb' ) as f: UpperCAmelCase__ :Optional[Any] = f.read() else: UpperCAmelCase__ :Any = bytes(SCREAMING_SNAKE_CASE , 'utf-8' ) print(SHAaaa(SCREAMING_SNAKE_CASE ).hash ) if __name__ == "__main__": main()
433
1
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int ): """simple docstring""" lowerCAmelCase__ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] lowerCAmelCase__ = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } lowerCAmelCase__ = F'{src_lang}-{tgt_lang}' lowerCAmelCase__ = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n' os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) lowerCAmelCase__ = os.path.join(lowerCAmelCase_ , "README.md" ) print(F'Generating {path}' ) with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as f: f.write(lowerCAmelCase_ ) # make sure we are under the root of the project UpperCamelCase = Path(__file__).resolve().parent.parent.parent UpperCamelCase = repo_dir / 'model_cards' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: UpperCamelCase , UpperCamelCase , UpperCamelCase = model_name.split('-') UpperCamelCase = model_cards_dir / 'facebook' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
61
import os import pickle import unittest from transformers import AutoTokenizer from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.bert_japanese.tokenization_bert_japanese import ( VOCAB_FILES_NAMES, BertJapaneseTokenizer, CharacterTokenizer, JumanppTokenizer, MecabTokenizer, SudachiTokenizer, WordpieceTokenizer, ) from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi from ...test_tokenization_common import TokenizerTesterMixin @custom_tokenizers class _lowercase ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE: Union[str, Any] = BertJapaneseTokenizer SCREAMING_SNAKE_CASE: Dict = False SCREAMING_SNAKE_CASE: List[Any] = True def _a ( self ): super().setUp() lowerCAmelCase_: Any = [ "[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは", "世界", "##世界", "、", "##、", "。", "##。", ] lowerCAmelCase_: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def _a ( self , lowerCamelCase__ ): lowerCAmelCase_: Optional[int] = "こんにちは、世界。 \nこんばんは、世界。" lowerCAmelCase_: Optional[int] = "こんにちは 、 世界 。 こんばんは 、 世界 。" return input_text, output_text def _a ( self , lowerCamelCase__ ): lowerCAmelCase_ , lowerCAmelCase_: str = self.get_input_output_texts(lowerCamelCase__ ) lowerCAmelCase_: Dict = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) lowerCAmelCase_: List[str] = tokenizer.decode(lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ ) return text, ids def _a ( self ): pass # TODO add if relevant def _a ( self ): pass # TODO add if relevant def _a ( self ): pass # TODO add if relevant def _a ( self ): lowerCAmelCase_: Union[str, Any] = self.tokenizer_class(self.vocab_file ) lowerCAmelCase_: Any = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" ) self.assertListEqual(lowerCamelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) def _a ( self ): lowerCAmelCase_: List[Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" ) self.assertIsNotNone(lowerCamelCase__ ) lowerCAmelCase_: str = "こんにちは、世界。\nこんばんは、世界。" lowerCAmelCase_: List[Any] = tokenizer.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCAmelCase_: Dict = os.path.join(self.tmpdirname , "tokenizer.bin" ) with open(lowerCamelCase__ , "wb" ) as handle: pickle.dump(lowerCamelCase__ , lowerCamelCase__ ) with open(lowerCamelCase__ , "rb" ) as handle: lowerCAmelCase_: Union[str, Any] = pickle.load(lowerCamelCase__ ) lowerCAmelCase_: Union[str, Any] = tokenizer_new.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) def _a ( self ): lowerCAmelCase_: List[str] = MecabTokenizer(mecab_dic="ipadic" ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , ) def _a ( self ): try: lowerCAmelCase_: List[Any] = MecabTokenizer(mecab_dic="unidic_lite" ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , ) def _a ( self ): try: lowerCAmelCase_: str = MecabTokenizer(mecab_dic="unidic" ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , ) def _a ( self ): lowerCAmelCase_: List[Any] = MecabTokenizer(do_lower_case=lowerCamelCase__ , mecab_dic="ipadic" ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , ) def _a ( self ): try: lowerCAmelCase_: Any = MecabTokenizer( do_lower_case=lowerCamelCase__ , normalize_text=lowerCamelCase__ , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" ) except RuntimeError: # if dict doesn't exist in the system, previous code raises this error. return self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , ) def _a ( self ): lowerCAmelCase_: str = MecabTokenizer(normalize_text=lowerCamelCase__ , mecab_dic="ipadic" ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , ) @require_sudachi def _a ( self ): lowerCAmelCase_: Any = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" ) self.assertIsNotNone(lowerCamelCase__ ) lowerCAmelCase_: str = "こんにちは、世界。\nこんばんは、世界。" lowerCAmelCase_: List[str] = tokenizer.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCAmelCase_: Optional[int] = os.path.join(self.tmpdirname , "tokenizer.bin" ) with open(lowerCamelCase__ , "wb" ) as handle: pickle.dump(lowerCamelCase__ , lowerCamelCase__ ) with open(lowerCamelCase__ , "rb" ) as handle: lowerCAmelCase_: List[str] = pickle.load(lowerCamelCase__ ) lowerCAmelCase_: Optional[int] = tokenizer_new.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) @require_sudachi def _a ( self ): lowerCAmelCase_: Union[str, Any] = SudachiTokenizer(sudachi_dict_type="core" ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , ) @require_sudachi def _a ( self ): lowerCAmelCase_: Optional[int] = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" ) self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] ) @require_sudachi def _a ( self ): lowerCAmelCase_: Any = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" ) self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] ) @require_sudachi def _a ( self ): lowerCAmelCase_: Dict = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" ) self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] ) @require_sudachi def _a ( self ): lowerCAmelCase_: List[str] = SudachiTokenizer(do_lower_case=lowerCamelCase__ , sudachi_dict_type="core" ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , ) @require_sudachi def _a ( self ): lowerCAmelCase_: Union[str, Any] = SudachiTokenizer(normalize_text=lowerCamelCase__ , sudachi_dict_type="core" ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , ) @require_sudachi def _a ( self ): lowerCAmelCase_: Tuple = SudachiTokenizer(trim_whitespace=lowerCamelCase__ , sudachi_dict_type="core" ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , ) @require_jumanpp def _a ( self ): lowerCAmelCase_: int = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" ) self.assertIsNotNone(lowerCamelCase__ ) lowerCAmelCase_: Any = "こんにちは、世界。\nこんばんは、世界。" lowerCAmelCase_: Optional[int] = tokenizer.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCAmelCase_: Optional[Any] = os.path.join(self.tmpdirname , "tokenizer.bin" ) with open(lowerCamelCase__ , "wb" ) as handle: pickle.dump(lowerCamelCase__ , lowerCamelCase__ ) with open(lowerCamelCase__ , "rb" ) as handle: lowerCAmelCase_: Any = pickle.load(lowerCamelCase__ ) lowerCAmelCase_: Union[str, Any] = tokenizer_new.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) @require_jumanpp def _a ( self ): lowerCAmelCase_: Union[str, Any] = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , ) @require_jumanpp def _a ( self ): lowerCAmelCase_: List[str] = JumanppTokenizer(do_lower_case=lowerCamelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , ) @require_jumanpp def _a ( self ): lowerCAmelCase_: Optional[Any] = JumanppTokenizer(normalize_text=lowerCamelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , ) @require_jumanpp def _a ( self ): lowerCAmelCase_: List[str] = JumanppTokenizer(trim_whitespace=lowerCamelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , ) @require_jumanpp def _a ( self ): lowerCAmelCase_: Any = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , ) def _a ( self ): lowerCAmelCase_: Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"] lowerCAmelCase_: Tuple = {} for i, token in enumerate(lowerCamelCase__ ): lowerCAmelCase_: List[Any] = i lowerCAmelCase_: List[str] = WordpieceTokenizer(vocab=lowerCamelCase__ , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] ) self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] ) self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] ) def _a ( self ): lowerCAmelCase_: List[str] = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" ) lowerCAmelCase_: Optional[Any] = tokenizer.subword_tokenizer lowerCAmelCase_: List[str] = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" ) self.assertListEqual(lowerCamelCase__ , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] ) lowerCAmelCase_: Optional[int] = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" ) self.assertListEqual(lowerCamelCase__ , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] ) def _a ( self ): lowerCAmelCase_: str = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" ) lowerCAmelCase_: int = tokenizer.encode("ありがとう。" , add_special_tokens=lowerCamelCase__ ) lowerCAmelCase_: Optional[int] = tokenizer.encode("どういたしまして。" , add_special_tokens=lowerCamelCase__ ) lowerCAmelCase_: List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ ) lowerCAmelCase_: List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class _lowercase ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE: Tuple = BertJapaneseTokenizer SCREAMING_SNAKE_CASE: Optional[Any] = False def _a ( self ): super().setUp() lowerCAmelCase_: Any = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"] lowerCAmelCase_: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def _a ( self , **lowerCamelCase__ ): return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **lowerCamelCase__ ) def _a ( self , lowerCamelCase__ ): lowerCAmelCase_: str = "こんにちは、世界。 \nこんばんは、世界。" lowerCAmelCase_: Tuple = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。" return input_text, output_text def _a ( self ): pass # TODO add if relevant def _a ( self ): pass # TODO add if relevant def _a ( self ): pass # TODO add if relevant def _a ( self ): lowerCAmelCase_: str = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" ) lowerCAmelCase_: Any = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" ) self.assertListEqual( lowerCamelCase__ , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] ) def _a ( self ): lowerCAmelCase_: Optional[Any] = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"] lowerCAmelCase_: List[str] = {} for i, token in enumerate(lowerCamelCase__ ): lowerCAmelCase_: Optional[Any] = i lowerCAmelCase_: List[Any] = CharacterTokenizer(vocab=lowerCamelCase__ , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] ) self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] ) def _a ( self ): lowerCAmelCase_: Optional[Any] = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" ) lowerCAmelCase_: Any = tokenizer.encode("ありがとう。" , add_special_tokens=lowerCamelCase__ ) lowerCAmelCase_: Dict = tokenizer.encode("どういたしまして。" , add_special_tokens=lowerCamelCase__ ) lowerCAmelCase_: Dict = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ ) lowerCAmelCase_: Any = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class _lowercase ( unittest.TestCase ): '''simple docstring''' def _a ( self ): lowerCAmelCase_: Union[str, Any] = "cl-tohoku/bert-base-japanese" lowerCAmelCase_: Optional[int] = AutoTokenizer.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) class _lowercase ( unittest.TestCase ): '''simple docstring''' def _a ( self ): lowerCAmelCase_: Dict = "cl-tohoku/bert-base-japanese" with self.assertLogs("transformers" , level="WARNING" ) as cm: BertTokenizer.from_pretrained(lowerCamelCase__ ) self.assertTrue( cm.records[0].message.startswith( "The tokenizer class you load from this checkpoint is not the same type as the class this function" " is called from." ) ) lowerCAmelCase_: List[str] = "bert-base-cased" with self.assertLogs("transformers" , level="WARNING" ) as cm: BertJapaneseTokenizer.from_pretrained(lowerCamelCase__ ) self.assertTrue( cm.records[0].message.startswith( "The tokenizer class you load from this checkpoint is not the same type as the class this function" " is called from." ) )
613
0
import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') A_ : Tuple = logging.getLogger(__name__) @dataclass class _a : '''simple docstring''' UpperCAmelCase__: str = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) UpperCAmelCase__: Optional[str] = field( default=__magic_name__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) UpperCAmelCase__: Optional[str] = field( default=__magic_name__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) UpperCAmelCase__: Optional[str] = field( default=__magic_name__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) UpperCAmelCase__: bool = field( default=__magic_name__ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , ) UpperCAmelCase__: str = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCAmelCase__: bool = field( default=__magic_name__ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) @dataclass class _a : '''simple docstring''' UpperCAmelCase__: Optional[str] = field(default=__magic_name__ , metadata={'''help''': '''The input training data file (a text file).'''} ) UpperCAmelCase__: Optional[str] = field( default=__magic_name__ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , ) UpperCAmelCase__: bool = field( default=__magic_name__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) UpperCAmelCase__: Optional[int] = field( default=__magic_name__ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , ) UpperCAmelCase__: Optional[int] = field( default=__magic_name__ , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. If passed, sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) UpperCAmelCase__: bool = field( default=__magic_name__ , metadata={ '''help''': ( '''Whether to pad all samples to the maximum sentence length. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch. More ''' '''efficient on GPU but very bad for TPU.''' ) } , ) UpperCAmelCase__: Optional[int] = field( default=__magic_name__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCAmelCase__: Optional[int] = field( default=__magic_name__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def __A ( self ): if self.train_file is not None: A__ : int = self.train_file.split(""".""" )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: A__ : Any = self.validation_file.split(""".""" )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class _a : '''simple docstring''' UpperCAmelCase__: PreTrainedTokenizerBase UpperCAmelCase__: Union[bool, str, PaddingStrategy] = True UpperCAmelCase__: Optional[int] = None UpperCAmelCase__: Optional[int] = None def __call__( self , A__ ): A__ : Dict = """label""" if """label""" in features[0].keys() else """labels""" A__ : int = [feature.pop(A__ ) for feature in features] A__ : Tuple = len(A__ ) A__ : Any = len(features[0]["""input_ids"""] ) A__ : str = [ [{k: v[i] for k, v in feature.items()} for i in range(A__ )] for feature in features ] A__ : Dict = list(chain(*A__ ) ) A__ : Dict = self.tokenizer.pad( A__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , ) # Un-flatten A__ : List[Any] = {k: v.view(A__ , A__ , -1 ) for k, v in batch.items()} # Add back labels A__ : Any = torch.tensor(A__ , dtype=torch.intaa ) return batch def UpperCamelCase () -> Tuple: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. A__ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. A__ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: A__ : Any = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_swag""" , lowercase_ , lowercase_ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() A__ : Any = training_args.get_process_log_level() logger.setLevel(lowercase_ ) datasets.utils.logging.set_verbosity(lowercase_ ) transformers.utils.logging.set_verbosity(lowercase_ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. A__ : int = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: A__ : str = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: A__ : Union[str, Any] = {} if data_args.train_file is not None: A__ : List[str] = data_args.train_file if data_args.validation_file is not None: A__ : Optional[Any] = data_args.validation_file A__ : List[Any] = data_args.train_file.split(""".""" )[-1] A__ : Any = load_dataset( lowercase_ , data_files=lowercase_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. A__ : int = load_dataset( """swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. A__ : Dict = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) A__ : int = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) A__ : List[Any] = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. A__ : Optional[int] = [f"""ending{i}""" for i in range(4 )] A__ : List[str] = """sent1""" A__ : List[str] = """sent2""" if data_args.max_seq_length is None: A__ : Tuple = tokenizer.model_max_length if max_seq_length > 1024: logger.warning( """The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value""" """ of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can""" """ override this default with `--block_size xxx`.""" ) A__ : Optional[Any] = 1024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) A__ : List[Any] = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(lowercase_: Dict ): A__ : Union[str, Any] = [[context] * 4 for context in examples[context_name]] A__ : str = examples[question_header_name] A__ : List[str] = [ [f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(lowercase_ ) ] # Flatten out A__ : int = list(chain(*lowercase_ ) ) A__ : Any = list(chain(*lowercase_ ) ) # Tokenize A__ : Optional[Any] = tokenizer( lowercase_ , lowercase_ , truncation=lowercase_ , max_length=lowercase_ , padding="""max_length""" if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(lowercase_ ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError("""--do_train requires a train dataset""" ) A__ : Dict = raw_datasets["""train"""] if data_args.max_train_samples is not None: A__ : int = min(len(lowercase_ ) , data_args.max_train_samples ) A__ : str = train_dataset.select(range(lowercase_ ) ) with training_args.main_process_first(desc="""train dataset map pre-processing""" ): A__ : str = train_dataset.map( lowercase_ , batched=lowercase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("""--do_eval requires a validation dataset""" ) A__ : Dict = raw_datasets["""validation"""] if data_args.max_eval_samples is not None: A__ : List[Any] = min(len(lowercase_ ) , data_args.max_eval_samples ) A__ : int = eval_dataset.select(range(lowercase_ ) ) with training_args.main_process_first(desc="""validation dataset map pre-processing""" ): A__ : Dict = eval_dataset.map( lowercase_ , batched=lowercase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator A__ : int = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=lowercase_ , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(lowercase_: str ): A__ : Union[str, Any] = eval_predictions A__ : Dict = np.argmax(lowercase_ , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer A__ : Union[str, Any] = Trainer( model=lowercase_ , args=lowercase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowercase_ , data_collator=lowercase_ , compute_metrics=lowercase_ , ) # Training if training_args.do_train: A__ : str = None if training_args.resume_from_checkpoint is not None: A__ : Optional[int] = training_args.resume_from_checkpoint elif last_checkpoint is not None: A__ : List[Any] = last_checkpoint A__ : Optional[int] = trainer.train(resume_from_checkpoint=lowercase_ ) trainer.save_model() # Saves the tokenizer too for easy upload A__ : Tuple = train_result.metrics A__ : str = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase_ ) ) A__ : int = min(lowercase_ , len(lowercase_ ) ) trainer.log_metrics("""train""" , lowercase_ ) trainer.save_metrics("""train""" , lowercase_ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) A__ : Any = trainer.evaluate() A__ : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase_ ) A__ : Union[str, Any] = min(lowercase_ , len(lowercase_ ) ) trainer.log_metrics("""eval""" , lowercase_ ) trainer.save_metrics("""eval""" , lowercase_ ) A__ : Union[str, Any] = { """finetuned_from""": model_args.model_name_or_path, """tasks""": """multiple-choice""", """dataset_tags""": """swag""", """dataset_args""": """regular""", """dataset""": """SWAG""", """language""": """en""", } if training_args.push_to_hub: trainer.push_to_hub(**lowercase_ ) else: trainer.create_model_card(**lowercase_ ) def UpperCamelCase (lowercase_: Optional[Any] ) -> Tuple: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
717
import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed A_ : Any = logging.getLogger(__name__) def UpperCamelCase (lowercase_: Optional[Any]=2 , lowercase_: Union[str, Any]=3 , lowercase_: int=16 , lowercase_: int = 10 , lowercase_: int = 2 ) -> int: def get_dataset(lowercase_: Optional[int] ): A__ : Optional[Any] = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(lowercase_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) A__ : Dict = get_dataset(lowercase_ ) A__ : Any = get_dataset(lowercase_ ) A__ : Dict = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 ) A__ : Optional[Any] = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 ) return (train_dataloader, valid_dataloader) def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: List[str] , lowercase_: int , lowercase_: int , lowercase_: List[str] , lowercase_: Dict=None ) -> List[Any]: A__ : List[Any] = [] for epoch in range(lowercase_ ): # Train quickly model.train() for batch in dataloader: A__ , A__ : Any = batch A__ : Any = model(lowercase_ ) A__ : Any = torch.nn.functional.mse_loss(lowercase_ , lowercase_ ) accelerator.backward(lowercase_ ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class _a (nn.Module ): '''simple docstring''' def __init__( self ): super().__init__() A__ : str = nn.Parameter(torch.randn(1 ) ) A__ : Any = nn.Parameter(torch.randn(1 ) ) def __A ( self , A__ ): return x * self.a + self.b class _a (unittest.TestCase ): '''simple docstring''' def __A ( self ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) A__ : Optional[Any] = DummyModel() A__ : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) A__ , A__ : str = dummy_dataloaders() A__ : Dict = ProjectConfiguration(total_limit=1 , project_dir=A__ , automatic_checkpoint_naming=A__ ) # Train baseline A__ : List[str] = Accelerator(project_config=A__ ) A__ , A__ , A__ , A__ : Any = accelerator.prepare( A__ , A__ , A__ , A__ ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 ) def __A ( self ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) A__ : str = DummyModel() A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) A__ , A__ : int = dummy_dataloaders() # Train baseline A__ : str = Accelerator() A__ , A__ , A__ , A__ : List[str] = accelerator.prepare( A__ , A__ , A__ , A__ ) # Save initial A__ : List[Any] = os.path.join(A__ , """initial""" ) accelerator.save_state(A__ ) ((A__) , (A__)) : str = model.a.item(), model.b.item() A__ : Dict = optimizer.state_dict() A__ : List[str] = train(3 , A__ , A__ , A__ , A__ ) ((A__) , (A__)) : str = model.a.item(), model.b.item() A__ : Any = optimizer.state_dict() # Train partially set_seed(42 ) A__ : Optional[int] = DummyModel() A__ : Dict = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) A__ , A__ : Dict = dummy_dataloaders() A__ : List[str] = Accelerator() A__ , A__ , A__ , A__ : Optional[Any] = accelerator.prepare( A__ , A__ , A__ , A__ ) accelerator.load_state(A__ ) ((A__) , (A__)) : Tuple = model.a.item(), model.b.item() A__ : Union[str, Any] = optimizer.state_dict() self.assertEqual(A__ , A__ ) self.assertEqual(A__ , A__ ) self.assertEqual(A__ , A__ ) A__ : List[str] = train(2 , A__ , A__ , A__ , A__ ) # Save everything A__ : Optional[int] = os.path.join(A__ , """checkpoint""" ) accelerator.save_state(A__ ) # Load everything back in and make sure all states work accelerator.load_state(A__ ) test_rands += train(1 , A__ , A__ , A__ , A__ ) ((A__) , (A__)) : Union[str, Any] = model.a.item(), model.b.item() A__ : Optional[int] = optimizer.state_dict() self.assertEqual(A__ , A__ ) self.assertEqual(A__ , A__ ) self.assertEqual(A__ , A__ ) self.assertEqual(A__ , A__ ) def __A ( self ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) A__ : int = DummyModel() A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) A__ , A__ : List[str] = dummy_dataloaders() A__ : str = ProjectConfiguration(automatic_checkpoint_naming=A__ ) # Train baseline A__ : Any = Accelerator(project_dir=A__ , project_config=A__ ) A__ , A__ , A__ , A__ : str = accelerator.prepare( A__ , A__ , A__ , A__ ) # Save initial accelerator.save_state() ((A__) , (A__)) : Tuple = model.a.item(), model.b.item() A__ : int = optimizer.state_dict() A__ : int = train(3 , A__ , A__ , A__ , A__ ) ((A__) , (A__)) : Optional[Any] = model.a.item(), model.b.item() A__ : Any = optimizer.state_dict() # Train partially set_seed(42 ) A__ : Dict = DummyModel() A__ : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) A__ , A__ : Union[str, Any] = dummy_dataloaders() A__ : List[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A__ ) A__ : Dict = Accelerator(project_dir=A__ , project_config=A__ ) A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare( A__ , A__ , A__ , A__ ) accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) ) ((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item() A__ : Tuple = optimizer.state_dict() self.assertEqual(A__ , A__ ) self.assertEqual(A__ , A__ ) self.assertEqual(A__ , A__ ) A__ : str = train(2 , A__ , A__ , A__ , A__ ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_1""" ) ) test_rands += train(1 , A__ , A__ , A__ , A__ ) ((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item() A__ : List[Any] = optimizer.state_dict() self.assertEqual(A__ , A__ ) self.assertEqual(A__ , A__ ) self.assertEqual(A__ , A__ ) self.assertEqual(A__ , A__ ) def __A ( self ): A__ : Union[str, Any] = torch.tensor([1, 2, 3] ) A__ : int = torch.tensor([2, 3, 4] ) A__ : List[Any] = DummyModel() A__ : List[Any] = torch.optim.Adam(net.parameters() ) A__ : Tuple = Accelerator() with self.assertRaises(A__ ) as ve: accelerator.register_for_checkpointing(A__ , A__ , A__ , A__ ) A__ : Any = str(ve.exception ) self.assertTrue("""Item at index 0""" in message ) self.assertTrue("""Item at index 1""" in message ) self.assertFalse("""Item at index 2""" in message ) self.assertFalse("""Item at index 3""" in message ) def __A ( self ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) A__ : Any = DummyModel() A__ : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) A__ : Dict = torch.optim.lr_scheduler.StepLR(A__ , step_size=1 , gamma=0.9_9 ) A__ , A__ : List[Any] = dummy_dataloaders() A__ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=A__ ) # Train baseline A__ : Optional[Any] = Accelerator(project_dir=A__ , project_config=A__ ) A__ , A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare( A__ , A__ , A__ , A__ , A__ ) # Save initial accelerator.save_state() A__ : Tuple = scheduler.state_dict() train(3 , A__ , A__ , A__ , A__ , A__ ) self.assertNotEqual(A__ , scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) ) self.assertEqual(A__ , scheduler.state_dict() ) def __A ( self ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) A__ : Optional[Any] = DummyModel() A__ : int = ProjectConfiguration(automatic_checkpoint_naming=A__ , total_limit=2 ) # Train baseline A__ : List[str] = Accelerator(project_dir=A__ , project_config=A__ ) A__ : Union[str, Any] = accelerator.prepare(A__ ) # Save 3 states: for _ in range(11 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_9""" ) ) ) self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_10""" ) ) ) @require_cuda def __A ( self ): A__ : Dict = ["""torchrun""", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] execute_subprocess_async(A__ , env=os.environ.copy() ) if __name__ == "__main__": A_ : List[str] = '/tmp/accelerate/state_checkpointing' A_ : Optional[Any] = DummyModel() A_ : Union[str, Any] = torch.optim.Adam(params=model.parameters(), lr=1E-3) A_ : str = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) A_ , A_ : List[Any] = dummy_dataloaders() A_ : int = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline A_ : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no') if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) A_ , A_ , A_ , A_ , A_ : List[Any] = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) A_ , A_ : Dict = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: A_ : str = group['params'][0].device break assert param_device.type == accelerator.device.type A_ : Optional[Any] = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu') for group in optimizer.param_groups: A_ : str = group['params'][0].device break assert ( param_device.type == torch.device('cpu').type ), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device') for group in optimizer.param_groups: A_ : Tuple = group['params'][0].device break assert ( param_device.type == accelerator.device.type ), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match='Unsupported optimizer map location passed'): accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid') accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
64
0
from unittest.mock import Mock, patch from file_transfer.send_file import send_file @patch("socket.socket" ) @patch("builtins.open" ) def lowercase_ ( __snake_case : Dict , __snake_case : Optional[int] ) -> str: '''simple docstring''' snake_case__ :Any = Mock() snake_case__ :Optional[int] = conn, Mock() snake_case__ :Dict = iter([1, None] ) snake_case__ :str = lambda __snake_case : next(__snake_case ) # ===== invoke ===== send_file(filename="mytext.txt" , testing=__snake_case ) # ===== ensurance ===== sock.assert_called_once() sock.return_value.bind.assert_called_once() sock.return_value.listen.assert_called_once() sock.return_value.accept.assert_called_once() conn.recv.assert_called_once() file.return_value.__enter__.assert_called_once() file.return_value.__enter__.return_value.read.assert_called() conn.send.assert_called_once() conn.close.assert_called_once() sock.return_value.shutdown.assert_called_once() sock.return_value.close.assert_called_once()
241
from __future__ import annotations def lowercase_ ( __snake_case : list[int] ) -> int: '''simple docstring''' if not nums: return 0 snake_case__ :Union[str, Any] = nums[0] snake_case__ :List[Any] = 0 for num in nums[1:]: snake_case__ , snake_case__ :Optional[Any] = ( max_excluding + num, max(__snake_case , __snake_case ), ) return max(__snake_case , __snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
241
1
"""simple docstring""" class a : def __init__( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] ): _UpperCAmelCase = name _UpperCAmelCase = value _UpperCAmelCase = weight def __repr__( self : List[str] ): return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def lowerCAmelCase_ ( self : Tuple ): return self.value def lowerCAmelCase_ ( self : Optional[Any] ): return self.name def lowerCAmelCase_ ( self : Optional[Any] ): return self.weight def lowerCAmelCase_ ( self : List[str] ): return self.value / self.weight def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = [] for i in range(len(lowercase ) ): menu.append(Things(name[i] ,value[i] ,weight[i] ) ) return menu def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = sorted(lowercase ,key=lowercase ,reverse=lowercase ) _UpperCAmelCase = [] _UpperCAmelCase , _UpperCAmelCase = 0.0, 0.0 for i in range(len(lowercase ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def __UpperCAmelCase ( ): """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
275
"""simple docstring""" def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = abs(lowercase ) _UpperCAmelCase = 0 while n > 0: res += n % 10 n //= 10 return res def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = abs(lowercase ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def __UpperCAmelCase ( lowercase ): """simple docstring""" return sum(int(lowercase ) for c in str(abs(lowercase ) ) ) def __UpperCAmelCase ( ): """simple docstring""" from collections.abc import Callable from timeit import timeit def benchmark_a_function(lowercase ,lowercase ) -> None: _UpperCAmelCase = f'''{func.__name__}({value})''' _UpperCAmelCase = timeit(f'''__main__.{call}''' ,setup="""import __main__""" ) print(f'''{call:56} = {func(lowercase )} -- {timing:.4f} seconds''' ) for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(lowercase ,lowercase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
275
1
from __future__ import annotations from random import random class UpperCAmelCase__ : def __init__( self ,A__ = None ): _A : Optional[Any] = value _A : Any = random() _A : str = None _A : Union[str, Any] = None def __repr__( self ): from pprint import pformat if self.left is None and self.right is None: return f"""\'{self.value}: {self.prior:.5}\'""" else: return pformat( {f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} ,indent=1 ) def __str__( self ): _A : str = str(self.value ) + ''' ''' _A : Union[str, Any] = str(self.left or '''''' ) _A : Tuple = str(self.right or '''''' ) return value + left + right def a__ (__lowercase :List[Any] , __lowercase :Any ) -> tuple[Node | None, Node | None]: if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: _A , _A : List[Any] = split(root.left , __lowerCamelCase ) return left, root else: _A , _A : Optional[Any] = split(root.right , __lowerCamelCase ) return root, right def a__ (__lowercase :int , __lowercase :int ) -> Node | None: if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: _A : Tuple = merge(left.right , __lowerCamelCase ) return left else: _A : Tuple = merge(__lowerCamelCase , right.left ) return right def a__ (__lowercase :Dict , __lowercase :Optional[Any] ) -> Node | None: _A : Tuple = Node(__lowerCamelCase ) _A , _A : Optional[int] = split(__lowerCamelCase , __lowerCamelCase ) return merge(merge(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ) def a__ (__lowercase :List[Any] , __lowercase :Dict ) -> Node | None: _A , _A : List[Any] = split(__lowerCamelCase , value - 1 ) _A , _A : int = split(__lowerCamelCase , __lowerCamelCase ) return merge(__lowerCamelCase , __lowerCamelCase ) def a__ (__lowercase :List[str] ) -> None: if not root: # None return else: inorder(root.left ) print(root.value , end=''',''' ) inorder(root.right ) def a__ (__lowercase :List[str] , __lowercase :List[Any] ) -> Node | None: for arg in args.split(): if arg[0] == "+": _A : Dict = insert(__lowerCamelCase , int(arg[1:] ) ) elif arg[0] == "-": _A : Optional[int] = erase(__lowerCamelCase , int(arg[1:] ) ) else: print('''Unknown command''' ) return root def a__ () -> None: _A : Optional[int] = None print( '''enter numbers to create a tree, + value to add value into treap, ''' '''- value to erase all nodes with value. \'q\' to quit. ''' ) _A : Optional[Any] = input() while args != "q": _A : Any = interact_treap(__lowerCamelCase , __lowerCamelCase ) print(__lowerCamelCase ) _A : Dict = input() print('''good by!''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
206
from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class __lowerCAmelCase : UpperCamelCase__ = PegasusConfig UpperCamelCase__ = {} UpperCamelCase__ = '''gelu''' def __init__( self :int , __magic_name__ :Optional[int] , __magic_name__ :str=13 , __magic_name__ :List[Any]=7 , __magic_name__ :Optional[int]=True , __magic_name__ :Optional[int]=False , __magic_name__ :List[Any]=99 , __magic_name__ :int=32 , __magic_name__ :Tuple=2 , __magic_name__ :List[str]=4 , __magic_name__ :Dict=37 , __magic_name__ :Tuple=0.1 , __magic_name__ :Optional[Any]=0.1 , __magic_name__ :Dict=40 , __magic_name__ :Tuple=2 , __magic_name__ :Optional[Any]=1 , __magic_name__ :Dict=0 , ): '''simple docstring''' a = parent a = batch_size a = seq_length a = is_training a = use_labels a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = eos_token_id a = pad_token_id a = bos_token_id def lowerCamelCase__ ( self :str ): '''simple docstring''' a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) a = tf.concat([input_ids, eos_tensor] , axis=1 ) a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) a = prepare_pegasus_inputs_dict(__magic_name__ , __magic_name__ , __magic_name__ ) return config, inputs_dict def lowerCamelCase__ ( self :List[Any] , __magic_name__ :Any , __magic_name__ :str ): '''simple docstring''' a = TFPegasusModel(config=__magic_name__ ).get_decoder() a = inputs_dict["""input_ids"""] a = input_ids[:1, :] a = inputs_dict["""attention_mask"""][:1, :] a = inputs_dict["""head_mask"""] a = 1 # first forward pass a = model(__magic_name__ , attention_mask=__magic_name__ , head_mask=__magic_name__ , use_cache=__magic_name__ ) a , a = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids a = ids_tensor((self.batch_size, 3) , config.vocab_size ) a = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and a = tf.concat([input_ids, next_tokens] , axis=-1 ) a = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) a = model(__magic_name__ , attention_mask=__magic_name__ )[0] a = model(__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice a = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) a = output_from_no_past[:, -3:, random_slice_idx] a = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__magic_name__ , __magic_name__ , rtol=1E-3 ) def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Tuple: if attention_mask is None: a = tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: a = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: a = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: a = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: a = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ): UpperCamelCase__ = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () UpperCamelCase__ = (TFPegasusForConditionalGeneration,) if is_tf_available() else () UpperCamelCase__ = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) UpperCamelCase__ = True UpperCamelCase__ = False UpperCamelCase__ = False def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = TFPegasusModelTester(self ) a = ConfigTester(self , config_class=__magic_name__ ) def lowerCamelCase__ ( self :Any ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__magic_name__ ) @require_sentencepiece @require_tokenizers @require_tf class __lowerCAmelCase ( unittest.TestCase ): UpperCamelCase__ = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] UpperCamelCase__ = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers UpperCamelCase__ = '''google/pegasus-xsum''' @cached_property def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def lowerCamelCase__ ( self :int , **__magic_name__ :int ): '''simple docstring''' a = self.translate_src_text(**__magic_name__ ) assert self.expected_text == generated_words def lowerCamelCase__ ( self :Union[str, Any] , **__magic_name__ :int ): '''simple docstring''' a = self.tokenizer(self.src_text , **__magic_name__ , padding=__magic_name__ , return_tensors="""tf""" ) a = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__magic_name__ , ) a = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__magic_name__ ) return generated_words @slow def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' self._assert_generated_batch_equal_expected()
468
0
"""simple docstring""" UpperCamelCase = 256 # Modulus to hash a string UpperCamelCase = 1_000_003 def lowerCAmelCase ( UpperCamelCase_: str , UpperCamelCase_: str ) -> bool: '''simple docstring''' _a = len(UpperCamelCase_ ) _a = len(UpperCamelCase_ ) if p_len > t_len: return False _a = 0 _a = 0 _a = 1 # Calculating the hash of pattern and substring of text for i in range(UpperCamelCase_ ): _a = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus _a = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue _a = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash _a = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def lowerCAmelCase ( ) -> None: '''simple docstring''' _a = "abc1abc12" _a = "alskfjaldsabc1abc1abc12k23adsfabcabc" _a = "alskfjaldsk23adsfabcabc" assert rabin_karp(UpperCamelCase_ , UpperCamelCase_ ) and not rabin_karp(UpperCamelCase_ , UpperCamelCase_ ) # Test 2) _a = "ABABX" _a = "ABABZABABYABABX" assert rabin_karp(UpperCamelCase_ , UpperCamelCase_ ) # Test 3) _a = "AAAB" _a = "ABAAAAAB" assert rabin_karp(UpperCamelCase_ , UpperCamelCase_ ) # Test 4) _a = "abcdabcy" _a = "abcxabcdabxabcdabcdabcy" assert rabin_karp(UpperCamelCase_ , UpperCamelCase_ ) # Test 5) _a = "Lü" _a = "Lüsai" assert rabin_karp(UpperCamelCase_ , UpperCamelCase_ ) _a = "Lue" assert not rabin_karp(UpperCamelCase_ , UpperCamelCase_ ) print("Success." ) if __name__ == "__main__": test_rabin_karp()
706
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """adapter_layer""": """encoder.layers.*.adapter_layer""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", """pooling_layer.linear""": """projector""", """pooling_layer.projection""": """classifier""", } UpperCamelCase = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """projector""", """classifier""", ] def lowerCAmelCase ( UpperCamelCase_: List[str] ) -> List[Any]: '''simple docstring''' _a = {} with open(UpperCamelCase_ , "r" ) as file: for line_number, line in enumerate(UpperCamelCase_ ): _a = line.strip() if line: _a = line.split() _a = line_number _a = words[0] _a = value return result def lowerCAmelCase ( UpperCamelCase_: List[str] , UpperCamelCase_: Dict , UpperCamelCase_: List[str] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' for attribute in key.split("." ): _a = getattr(UpperCamelCase_ , UpperCamelCase_ ) _a = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(UpperCamelCase_ ): _a = PARAM_MAPPING[full_name.split("." )[-1]] _a = "param" if weight_type is not None and weight_type != "param": _a = getattr(UpperCamelCase_ , UpperCamelCase_ ).shape elif weight_type is not None and weight_type == "param": _a = hf_pointer for attribute in hf_param_name.split("." ): _a = getattr(UpperCamelCase_ , UpperCamelCase_ ) _a = shape_pointer.shape # let's reduce dimension _a = value[0] else: _a = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": _a = value elif weight_type == "weight_g": _a = value elif weight_type == "weight_v": _a = value elif weight_type == "bias": _a = value elif weight_type == "param": for attribute in hf_param_name.split("." ): _a = getattr(UpperCamelCase_ , UpperCamelCase_ ) _a = value else: _a = value logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def lowerCAmelCase ( UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Tuple , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] ) -> str: '''simple docstring''' _a = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(UpperCamelCase_ ): _a = PARAM_MAPPING[full_name.split("." )[-1]] _a = "param" if weight_type is not None and weight_type != "param": _a = ".".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": _a = ".".join([key, hf_param_name] ) else: _a = key _a = value if "lm_head" in full_key else value[0] UpperCamelCase = { """W_a""": """linear_1.weight""", """W_b""": """linear_2.weight""", """b_a""": """linear_1.bias""", """b_b""": """linear_2.bias""", """ln_W""": """norm.weight""", """ln_b""": """norm.bias""", } def lowerCAmelCase ( UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int=None , UpperCamelCase_: Optional[int]=None ) -> Tuple: '''simple docstring''' _a = False for key, mapped_key in MAPPING.items(): _a = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: _a = True if "*" in mapped_key: _a = name.split(UpperCamelCase_ )[0].split("." )[-2] _a = mapped_key.replace("*" , UpperCamelCase_ ) if "weight_g" in name: _a = "weight_g" elif "weight_v" in name: _a = "weight_v" elif "bias" in name: _a = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj _a = "weight" else: _a = None if hf_dict is not None: rename_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) else: set_recursively(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) return is_used return is_used def lowerCAmelCase ( UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Dict ) -> Dict: '''simple docstring''' _a = [] _a = fairseq_model.state_dict() _a = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): _a = False if "conv_layers" in name: load_conv_layer( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , hf_model.config.feat_extract_norm == "group" , ) _a = True else: _a = load_wavaveca_layer(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) if not is_used: unused_weights.append(UpperCamelCase_ ) logger.warning(f'''Unused weights: {unused_weights}''' ) def lowerCAmelCase ( UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[int] ) -> Dict: '''simple docstring''' _a = full_name.split("conv_layers." )[-1] _a = name.split("." ) _a = int(items[0] ) _a = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) _a = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) _a = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) _a = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) _a = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(UpperCamelCase_ ) @torch.no_grad() def lowerCAmelCase ( UpperCamelCase_: Tuple , UpperCamelCase_: List[str] , UpperCamelCase_: int=None , UpperCamelCase_: int=None , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Any=False ) -> Optional[Any]: '''simple docstring''' if config_path is not None: _a = WavaVecaConfig.from_pretrained(UpperCamelCase_ ) else: _a = WavaVecaConfig() if is_seq_class: _a = read_txt_into_dict(UpperCamelCase_ ) _a = idalabel _a = WavaVecaForSequenceClassification(UpperCamelCase_ ) _a = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , ) feature_extractor.save_pretrained(UpperCamelCase_ ) elif is_finetuned: if dict_path: _a = Dictionary.load(UpperCamelCase_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _a = target_dict.pad_index _a = target_dict.bos_index _a = target_dict.eos_index _a = len(target_dict.symbols ) _a = os.path.join(UpperCamelCase_ , "vocab.json" ) if not os.path.isdir(UpperCamelCase_ ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(UpperCamelCase_ ) ) return os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ ) _a = target_dict.indices # fairseq has the <pad> and <s> switched _a = 0 _a = 1 with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as vocab_handle: json.dump(UpperCamelCase_ , UpperCamelCase_ ) _a = WavaVecaCTCTokenizer( UpperCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=UpperCamelCase_ , ) _a = True if config.feat_extract_norm == "layer" else False _a = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , ) _a = WavaVecaProcessor(feature_extractor=UpperCamelCase_ , tokenizer=UpperCamelCase_ ) processor.save_pretrained(UpperCamelCase_ ) _a = WavaVecaForCTC(UpperCamelCase_ ) else: _a = WavaVecaForPreTraining(UpperCamelCase_ ) if is_finetuned or is_seq_class: _a , _a , _a = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: _a = argparse.Namespace(task="audio_pretraining" ) _a = fairseq.tasks.setup_task(UpperCamelCase_ ) _a , _a , _a = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCamelCase_ ) _a = model[0].eval() recursively_load_weights(UpperCamelCase_ , UpperCamelCase_ , not is_finetuned ) hf_wavavec.save_pretrained(UpperCamelCase_ ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) parser.add_argument( """--is_seq_class""", action="""store_true""", help="""Whether the model to convert is a fine-tuned sequence classification model or not""", ) UpperCamelCase = parser.parse_args() UpperCamelCase = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
612
0