code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" from math import ceil def _lowerCAmelCase ( lowercase_ = 1001 ): UpperCAmelCase = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): UpperCAmelCase = 2 * i + 1 UpperCAmelCase = 2 * i UpperCAmelCase = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: snake_case_ = int(sys.argv[1]) print(solution(n)) except ValueError: print("""Invalid entry - please enter a number""")
78
"""simple docstring""" def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int: while a != 0: a__ , a__: List[str] = b % a, a return b def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int: if gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) != 1: a__: Dict = F'mod inverse of {a!r} and {m!r} does not exist' raise ValueError(_SCREAMING_SNAKE_CASE ) a__ , a__ , a__: Union[str, Any] = 1, 0, a a__ , a__ , a__: Any = 0, 1, m while va != 0: a__: int = ua // va a__ , a__ , a__ , a__ , a__ , a__: Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
290
0
from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ): lowercase :Dict = [] for part_id in partition_order: lowercase :str = df.where(F"SPARK_PARTITION_ID() = {part_id}" ).collect() for row_idx, row in enumerate(lowerCamelCase ): expected_row_ids_and_row_dicts.append((F"{part_id}_{row_idx}", row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def UpperCAmelCase__ ( ): lowercase :List[str] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase :Any = spark.range(100 ).repartition(1 ) lowercase :List[str] = Spark(lowerCamelCase ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def UpperCAmelCase__ ( ): lowercase :Any = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase :List[str] = spark.range(10 ).repartition(2 ) lowercase :Any = [1, 0] lowercase :int = _generate_iterable_examples(lowerCamelCase, lowerCamelCase ) # Reverse the partitions. lowercase :Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase, lowerCamelCase ) for i, (row_id, row_dict) in enumerate(generate_fn() ): lowercase , lowercase :Union[str, Any] = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def UpperCAmelCase__ ( ): lowercase :List[str] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase :Optional[int] = spark.range(10 ).repartition(1 ) lowercase :Optional[Any] = SparkExamplesIterable(lowerCamelCase ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(lowerCamelCase ): assert row_id == F"0_{i}" assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def UpperCAmelCase__ ( ): lowercase :Tuple = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase :Dict = spark.range(30 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch("numpy.random.Generator" ) as generator_mock: lowercase :Any = lambda lowerCamelCase : x.reverse() lowercase :List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase, [2, 1, 0] ) lowercase :Optional[int] = SparkExamplesIterable(lowerCamelCase ).shuffle_data_sources(lowerCamelCase ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(lowerCamelCase ): lowercase , lowercase :str = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def UpperCAmelCase__ ( ): lowercase :Tuple = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase :Union[str, Any] = spark.range(20 ).repartition(4 ) # Partitions 0 and 2 lowercase :Union[str, Any] = SparkExamplesIterable(lowerCamelCase ).shard_data_sources(worker_id=0, num_workers=2 ) assert shard_it_a.n_shards == 2 lowercase :Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase, [0, 2] ) for i, (row_id, row_dict) in enumerate(lowerCamelCase ): lowercase , lowercase :Tuple = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 lowercase :Optional[Any] = SparkExamplesIterable(lowerCamelCase ).shard_data_sources(worker_id=1, num_workers=2 ) assert shard_it_a.n_shards == 2 lowercase :List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase, [1, 3] ) for i, (row_id, row_dict) in enumerate(lowerCamelCase ): lowercase , lowercase :Tuple = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def UpperCAmelCase__ ( ): lowercase :Tuple = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase :int = spark.range(100 ).repartition(1 ) lowercase :str = Spark(lowerCamelCase ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 100
158
import logging import os import threading import time try: import warnings except ImportError: _UpperCAmelCase : List[str] = None try: import msvcrt except ImportError: _UpperCAmelCase : Tuple = None try: import fcntl except ImportError: _UpperCAmelCase : Optional[Any] = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: _UpperCAmelCase : Tuple = OSError # Data # ------------------------------------------------ _UpperCAmelCase : Optional[int] = [ "Timeout", "BaseFileLock", "WindowsFileLock", "UnixFileLock", "SoftFileLock", "FileLock", ] _UpperCAmelCase : Optional[Any] = "3.0.12" _UpperCAmelCase : int = None def UpperCAmelCase__ ( ): global _logger lowercase :List[str] = _logger or logging.getLogger(__name__ ) return _logger class __lowerCAmelCase ( lowerCAmelCase): def __init__( self: int , _lowerCAmelCase: Dict ): lowercase :Any = lock_file return None def __str__( self: Dict ): lowercase :str = F"The file lock '{self.lock_file}' could not be acquired." return temp class __lowerCAmelCase : def __init__( self: Tuple , _lowerCAmelCase: Any ): lowercase :Optional[Any] = lock return None def __enter__( self: List[Any] ): return self.lock def __exit__( self: Dict , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Optional[int] ): self.lock.release() return None class __lowerCAmelCase : def __init__( self: Optional[Any] , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: Tuple=-1 , _lowerCAmelCase: int=None ): lowercase :Any = max_filename_length if max_filename_length is not None else 2_55 # Hash the filename if it's too long lowercase :int = self.hash_filename_if_too_long(_lowerCAmelCase , _lowerCAmelCase ) # The path to the lock file. lowercase :List[Any] = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. lowercase :Any = None # The default timeout value. lowercase :Any = timeout # We use this lock primarily for the lock counter. lowercase :Optional[int] = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. lowercase :Optional[int] = 0 return None @property def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ): return self._lock_file @property def SCREAMING_SNAKE_CASE ( self: Optional[Any] ): return self._timeout @timeout.setter def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: List[str] ): lowercase :Tuple = float(_lowerCAmelCase ) return None def SCREAMING_SNAKE_CASE ( self: int ): raise NotImplementedError() def SCREAMING_SNAKE_CASE ( self: int ): raise NotImplementedError() @property def SCREAMING_SNAKE_CASE ( self: Optional[Any] ): return self._lock_file_fd is not None def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: List[Any]=None , _lowerCAmelCase: Union[str, Any]=0.05 ): # Use the default timeout, if no timeout is provided. if timeout is None: lowercase :List[str] = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 lowercase :Any = id(self ) lowercase :Optional[int] = self._lock_file lowercase :Optional[Any] = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F"Attempting to acquire lock {lock_id} on {lock_filename}" ) self._acquire() if self.is_locked: logger().debug(F"Lock {lock_id} acquired on {lock_filename}" ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F"Timeout on acquiring lock {lock_id} on {lock_filename}" ) raise Timeout(self._lock_file ) else: logger().debug( F"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." ) time.sleep(_lowerCAmelCase ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: lowercase :Union[str, Any] = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: Tuple=False ): with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: lowercase :Union[str, Any] = id(self ) lowercase :str = self._lock_file logger().debug(F"Attempting to release lock {lock_id} on {lock_filename}" ) self._release() lowercase :List[str] = 0 logger().debug(F"Lock {lock_id} released on {lock_filename}" ) return None def __enter__( self: Tuple ): self.acquire() return self def __exit__( self: Union[str, Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Dict ): self.release() return None def __del__( self: Optional[Any] ): self.release(force=_lowerCAmelCase ) return None def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: str , _lowerCAmelCase: int ): lowercase :Union[str, Any] = os.path.basename(_lowerCAmelCase ) if len(_lowerCAmelCase ) > max_length and max_length > 0: lowercase :Dict = os.path.dirname(_lowerCAmelCase ) lowercase :Any = str(hash(_lowerCAmelCase ) ) lowercase :Union[str, Any] = filename[: max_length - len(_lowerCAmelCase ) - 8] + "..." + hashed_filename + ".lock" return os.path.join(_lowerCAmelCase , _lowerCAmelCase ) else: return path class __lowerCAmelCase ( lowerCAmelCase): def __init__( self: int , _lowerCAmelCase: int , _lowerCAmelCase: Optional[Any]=-1 , _lowerCAmelCase: List[Any]=None ): from .file_utils import relative_to_absolute_path super().__init__(_lowerCAmelCase , timeout=_lowerCAmelCase , max_filename_length=_lowerCAmelCase ) lowercase :Optional[int] = "\\\\?\\" + relative_to_absolute_path(self.lock_file ) def SCREAMING_SNAKE_CASE ( self: Any ): lowercase :int = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: lowercase :Tuple = os.open(self._lock_file , _lowerCAmelCase ) except OSError: pass else: try: msvcrt.locking(_lowerCAmelCase , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(_lowerCAmelCase ) else: lowercase :Any = fd return None def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ): lowercase :Any = self._lock_file_fd lowercase :Tuple = None msvcrt.locking(_lowerCAmelCase , msvcrt.LK_UNLCK , 1 ) os.close(_lowerCAmelCase ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class __lowerCAmelCase ( lowerCAmelCase): def __init__( self: str , _lowerCAmelCase: Tuple , _lowerCAmelCase: Dict=-1 , _lowerCAmelCase: Tuple=None ): lowercase :List[str] = os.statvfs(os.path.dirname(_lowerCAmelCase ) ).f_namemax super().__init__(_lowerCAmelCase , timeout=_lowerCAmelCase , max_filename_length=_lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self: int ): lowercase :Any = os.O_RDWR | os.O_CREAT | os.O_TRUNC lowercase :Optional[int] = os.open(self._lock_file , _lowerCAmelCase ) try: fcntl.flock(_lowerCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(_lowerCAmelCase ) else: lowercase :Optional[Any] = fd return None def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ): # Do not remove the lockfile: # # https://github.com/benediktschmitt/py-filelock/issues/31 # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition lowercase :Dict = self._lock_file_fd lowercase :Union[str, Any] = None fcntl.flock(_lowerCAmelCase , fcntl.LOCK_UN ) os.close(_lowerCAmelCase ) return None class __lowerCAmelCase ( lowerCAmelCase): def SCREAMING_SNAKE_CASE ( self: List[Any] ): lowercase :str = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: lowercase :List[Any] = os.open(self._lock_file , _lowerCAmelCase ) except OSError: pass else: lowercase :int = fd return None def SCREAMING_SNAKE_CASE ( self: Optional[Any] ): os.close(self._lock_file_fd ) lowercase :int = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None _UpperCAmelCase : Tuple = None if msvcrt: _UpperCAmelCase : str = WindowsFileLock elif fcntl: _UpperCAmelCase : List[Any] = UnixFileLock else: _UpperCAmelCase : Optional[int] = SoftFileLock if warnings is not None: warnings.warn("only soft file lock is available")
158
1
"""simple docstring""" import os import zipfile import pytest from datasets.utils.extract import ( BzipaExtractor, Extractor, GzipExtractor, LzaExtractor, SevenZipExtractor, TarExtractor, XzExtractor, ZipExtractor, ZstdExtractor, ) from .utils import require_lza, require_pyazr, require_zstandard @pytest.mark.parametrize( 'compression_format, is_archive' , [ ('7z', True), ('bz2', False), ('gzip', False), ('lz4', False), ('tar', True), ('xz', False), ('zip', True), ('zstd', False), ] , ) def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , ): lowercase_ : Any = { '7z': (seven_zip_file, SevenZipExtractor), 'bz2': (bza_file, BzipaExtractor), 'gzip': (gz_file, GzipExtractor), 'lz4': (lza_file, LzaExtractor), 'tar': (tar_file, TarExtractor), 'xz': (xz_file, XzExtractor), 'zip': (zip_file, ZipExtractor), 'zstd': (zstd_file, ZstdExtractor), } lowercase_ , lowercase_ : List[Any] = input_paths_and_base_extractors[compression_format] if input_path is None: lowercase_ : str = F'''for \'{compression_format}\' compression_format, ''' if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(__SCREAMING_SNAKE_CASE ) assert base_extractor.is_extractable(__SCREAMING_SNAKE_CASE ) lowercase_ : Any = tmp_path / ('extracted' if is_archive else 'extracted.txt') base_extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name lowercase_ : List[str] = file_path.read_text(encoding='utf-8' ) else: lowercase_ : List[Any] = output_path.read_text(encoding='utf-8' ) lowercase_ : List[Any] = text_file.read_text(encoding='utf-8' ) assert extracted_file_content == expected_file_content @pytest.mark.parametrize( 'compression_format, is_archive' , [ ('7z', True), ('bz2', False), ('gzip', False), ('lz4', False), ('tar', True), ('xz', False), ('zip', True), ('zstd', False), ] , ) def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple , ): lowercase_ : List[Any] = { '7z': seven_zip_file, 'bz2': bza_file, 'gzip': gz_file, 'lz4': lza_file, 'tar': tar_file, 'xz': xz_file, 'zip': zip_file, 'zstd': zstd_file, } lowercase_ : Optional[Any] = input_paths[compression_format] if input_path is None: lowercase_ : Optional[Any] = F'''for \'{compression_format}\' compression_format, ''' if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(__SCREAMING_SNAKE_CASE ) lowercase_ : List[Any] = Extractor.infer_extractor_format(__SCREAMING_SNAKE_CASE ) assert extractor_format is not None lowercase_ : Optional[Any] = tmp_path / ('extracted' if is_archive else 'extracted.txt') Extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name lowercase_ : str = file_path.read_text(encoding='utf-8' ) else: lowercase_ : Optional[Any] = output_path.read_text(encoding='utf-8' ) lowercase_ : List[str] = text_file.read_text(encoding='utf-8' ) assert extracted_file_content == expected_file_content @pytest.fixture def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ): import tarfile lowercase_ : int = tmp_path / 'data_dot_dot' directory.mkdir() lowercase_ : Union[str, Any] = directory / 'tar_file_with_dot_dot.tar' with tarfile.TarFile(__SCREAMING_SNAKE_CASE , 'w' ) as f: f.add(__SCREAMING_SNAKE_CASE , arcname=os.path.join('..' , text_file.name ) ) return path @pytest.fixture def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] ): import tarfile lowercase_ : List[str] = tmp_path / 'data_sym_link' directory.mkdir() lowercase_ : List[Any] = directory / 'tar_file_with_sym_link.tar' os.symlink('..' , directory / 'subdir' , target_is_directory=__SCREAMING_SNAKE_CASE ) with tarfile.TarFile(__SCREAMING_SNAKE_CASE , 'w' ) as f: f.add(str(directory / 'subdir' ) , arcname='subdir' ) # str required by os.readlink on Windows and Python < 3.8 return path @pytest.mark.parametrize( 'insecure_tar_file, error_log' , [('tar_file_with_dot_dot', 'illegal path'), ('tar_file_with_sym_link', 'Symlink')] , ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict ): lowercase_ : str = { 'tar_file_with_dot_dot': tar_file_with_dot_dot, 'tar_file_with_sym_link': tar_file_with_sym_link, } lowercase_ : Tuple = insecure_tar_files[insecure_tar_file] lowercase_ : str = tmp_path / 'extracted' TarExtractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert caplog.text for record in caplog.records: assert record.levelname == "ERROR" assert error_log in record.msg def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] ): # We should have less false positives than zipfile.is_zipfile # We do that by checking only the magic number lowercase_ : Tuple = tmpdir / 'not_a_zip_file' # From: https://github.com/python/cpython/pull/5053 lowercase_ : Dict = ( b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00' b'\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I' b'DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07' b'\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82' ) with not_a_zip_file.open('wb' ) as f: f.write(__SCREAMING_SNAKE_CASE ) assert zipfile.is_zipfile(str(__SCREAMING_SNAKE_CASE ) ) # is a false positive for `zipfile` assert not ZipExtractor.is_extractable(__SCREAMING_SNAKE_CASE ) # but we're right
213
"""simple docstring""" from __future__ import annotations def lowercase__( __SCREAMING_SNAKE_CASE : list ): if not nums: raise ValueError('List is empty' ) return sum(__SCREAMING_SNAKE_CASE ) / len(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
213
1
from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase_ : Tuple = logging.get_logger(__name__) lowerCamelCase_ : Union[str, Any] = { """google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""", } class a__ ( __snake_case ): A__ : Tuple = 'efficientnet' def __init__( self , UpperCAmelCase = 3 , UpperCAmelCase = 6_0_0 , UpperCAmelCase = 2.0 , UpperCAmelCase = 3.1 , UpperCAmelCase = 8 , UpperCAmelCase = [3, 3, 5, 3, 5, 5, 3] , UpperCAmelCase = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , UpperCAmelCase = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , UpperCAmelCase = [] , UpperCAmelCase = [1, 2, 2, 2, 1, 2, 1] , UpperCAmelCase = [1, 2, 2, 3, 3, 4, 1] , UpperCAmelCase = [1, 6, 6, 6, 6, 6, 6] , UpperCAmelCase = 0.25 , UpperCAmelCase = "swish" , UpperCAmelCase = 2_5_6_0 , UpperCAmelCase = "mean" , UpperCAmelCase = 0.02 , UpperCAmelCase = 0.001 , UpperCAmelCase = 0.99 , UpperCAmelCase = 0.5 , UpperCAmelCase = 0.2 , **UpperCAmelCase , ) -> int: super().__init__(**UpperCAmelCase ) __a = num_channels __a = image_size __a = width_coefficient __a = depth_coefficient __a = depth_divisor __a = kernel_sizes __a = in_channels __a = out_channels __a = depthwise_padding __a = strides __a = num_block_repeats __a = expand_ratios __a = squeeze_expansion_ratio __a = hidden_act __a = hidden_dim __a = pooling_type __a = initializer_range __a = batch_norm_eps __a = batch_norm_momentum __a = dropout_rate __a = drop_connect_rate __a = sum(UpperCAmelCase ) * 4 class a__ ( __snake_case ): A__ : Optional[int] = version.parse('1.11' ) @property def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def __SCREAMING_SNAKE_CASE ( self ) -> float: return 1e-5
197
from collections import defaultdict def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ): __a = first_str.lower().strip() __a = second_str.lower().strip() # Remove whitespace __a = first_str.replace(' ' , '' ) __a = second_str.replace(' ' , '' ) # Strings of different lengths are not anagrams if len(__lowerCamelCase ) != len(__lowerCamelCase ): return False # Default values for count should be 0 __a = defaultdict(__lowerCamelCase ) # For each character in input strings, # increment count in the corresponding for i in range(len(__lowerCamelCase ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() lowerCamelCase_ : List[str] = input("""Enter the first string """).strip() lowerCamelCase_ : Optional[Any] = input("""Enter the second string """).strip() lowerCamelCase_ : str = check_anagrams(input_a, input_b) print(F'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
197
1
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : List[Any] = ["""image_processor""", """tokenizer"""] lowerCAmelCase__ : Optional[int] = """ViTImageProcessor""" lowerCAmelCase__ : Optional[Any] = ("""CLIPTokenizer""", """CLIPTokenizerFast""") def __init__(self : List[Any] , UpperCamelCase : List[str]=None , UpperCamelCase : List[Any]=None , **UpperCamelCase : Union[str, Any] ): '''simple docstring''' lowercase__ = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , UpperCamelCase , ) lowercase__ = kwargs.pop('''feature_extractor''' ) lowercase__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(UpperCamelCase , UpperCamelCase ) def __call__(self : Optional[Any] , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Dict=None , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : List[str]=None , **UpperCamelCase : List[Any] ): '''simple docstring''' if text is None and visual_prompt is None and images is None: raise ValueError('''You have to specify either text, visual prompt or images.''' ) if text is not None and visual_prompt is not None: raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' ) if text is not None: lowercase__ = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if visual_prompt is not None: lowercase__ = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if images is not None: lowercase__ = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if visual_prompt is not None and images is not None: lowercase__ = { '''pixel_values''': image_features.pixel_values, '''conditional_pixel_values''': prompt_features.pixel_values, } return encoding elif text is not None and images is not None: lowercase__ = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: lowercase__ = { '''conditional_pixel_values''': prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase ) def UpperCamelCase__ (self : Tuple , *UpperCamelCase : List[str] , **UpperCamelCase : Union[str, Any] ): '''simple docstring''' return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase ) def UpperCamelCase__ (self : List[str] , *UpperCamelCase : Any , **UpperCamelCase : int ): '''simple docstring''' return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase ) @property def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCamelCase , ) return self.image_processor_class @property def UpperCamelCase__ (self : Dict ): '''simple docstring''' warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCamelCase , ) return self.image_processor
2
'''simple docstring''' import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": lowerCamelCase : List[str] = argparse.ArgumentParser( description=( 'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='bert', choices=['bert']) parser.add_argument('--model_name', default='bert-base-uncased', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') lowerCamelCase : str = parser.parse_args() if args.model_type == "bert": lowerCamelCase : List[Any] = BertForMaskedLM.from_pretrained(args.model_name) lowerCamelCase : Any = 'bert' else: raise ValueError('args.model_type should be "bert".') lowerCamelCase : int = model.state_dict() lowerCamelCase : int = {} for w in ["word_embeddings", "position_embeddings"]: lowerCamelCase : Any = state_dict[f"""{prefix}.embeddings.{w}.weight"""] for w in ["weight", "bias"]: lowerCamelCase : Any = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""] lowerCamelCase : Tuple = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ["weight", "bias"]: lowerCamelCase : Dict = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}""" ] lowerCamelCase : Dict = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}""" ] lowerCamelCase : List[Any] = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}""" ] lowerCamelCase : Tuple = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}""" ] lowerCamelCase : Optional[int] = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}""" ] lowerCamelCase : Optional[Any] = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}""" ] lowerCamelCase : Dict = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}""" ] lowerCamelCase : Any = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}""" ] std_idx += 1 lowerCamelCase : Optional[int] = state_dict['cls.predictions.decoder.weight'] lowerCamelCase : str = state_dict['cls.predictions.bias'] if args.vocab_transform: for w in ["weight", "bias"]: lowerCamelCase : str = state_dict[f"""cls.predictions.transform.dense.{w}"""] lowerCamelCase : Any = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""] print(f"""N layers selected for distillation: {std_idx}""") print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""") print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""") torch.save(compressed_sd, args.dump_checkpoint)
2
1
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) class UpperCamelCase ( lowercase_ ): lowercase = ['input_features'] def __init__( self ,__UpperCamelCase=80 ,__UpperCamelCase=1_6000 ,__UpperCamelCase=160 ,__UpperCamelCase=30 ,__UpperCamelCase=400 ,__UpperCamelCase=0.0 ,__UpperCamelCase=False ,**__UpperCamelCase ,) -> Tuple: '''simple docstring''' super().__init__( feature_size=__UpperCamelCase ,sampling_rate=__UpperCamelCase ,padding_value=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,**__UpperCamelCase ,) lowercase_ : Optional[Any] = n_fft lowercase_ : Any = hop_length lowercase_ : Optional[Any] = chunk_length lowercase_ : str = chunk_length * sampling_rate lowercase_ : str = self.n_samples // hop_length lowercase_ : Tuple = sampling_rate lowercase_ : str = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 ,num_mel_filters=__UpperCamelCase ,min_frequency=0.0 ,max_frequency=8000.0 ,sampling_rate=__UpperCamelCase ,norm='slaney' ,mel_scale='slaney' ,) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> np.ndarray: '''simple docstring''' lowercase_ : str = spectrogram( __UpperCamelCase ,window_function(self.n_fft ,'hann' ) ,frame_length=self.n_fft ,hop_length=self.hop_length ,power=2.0 ,mel_filters=self.mel_filters ,log_mel='log10' ,) lowercase_ : Any = log_spec[:, :-1] lowercase_ : int = np.maximum(__UpperCamelCase ,log_spec.max() - 8.0 ) lowercase_ : Any = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def _UpperCAmelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = 0.0 ) -> List[np.ndarray]: '''simple docstring''' if attention_mask is not None: lowercase_ : List[str] = np.array(__UpperCamelCase ,np.intaa ) lowercase_ : List[Any] = [] for vector, length in zip(__UpperCamelCase ,attention_mask.sum(-1 ) ): lowercase_ : Dict = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 ) if length < normed_slice.shape[0]: lowercase_ : Dict = padding_value normed_input_values.append(__UpperCamelCase ) else: lowercase_ : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values] return normed_input_values def __call__( self ,__UpperCamelCase ,__UpperCamelCase = True ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = "max_length" ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> BatchFeature: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) lowercase_ : List[str] = isinstance(__UpperCamelCase ,np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) lowercase_ : Optional[int] = is_batched_numpy or ( isinstance(__UpperCamelCase ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: lowercase_ : Optional[int] = [np.asarray([speech] ,dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__UpperCamelCase ,np.ndarray ): lowercase_ : Union[str, Any] = np.asarray(__UpperCamelCase ,dtype=np.floataa ) elif isinstance(__UpperCamelCase ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowercase_ : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowercase_ : Union[str, Any] = [np.asarray([raw_speech] ).T] lowercase_ : Tuple = BatchFeature({'input_features': raw_speech} ) # convert into correct format for padding lowercase_ : Any = self.pad( __UpperCamelCase ,padding=__UpperCamelCase ,max_length=max_length if max_length else self.n_samples ,truncation=__UpperCamelCase ,pad_to_multiple_of=__UpperCamelCase ,return_attention_mask=return_attention_mask or do_normalize ,) # zero-mean and unit-variance normalization if do_normalize: lowercase_ : Any = self.zero_mean_unit_var_norm( padded_inputs['input_features'] ,attention_mask=padded_inputs['attention_mask'] ,padding_value=self.padding_value ,) lowercase_ : str = np.stack(padded_inputs['input_features'] ,axis=0 ) # make sure list is in array format lowercase_ : Union[str, Any] = padded_inputs.get('input_features' ).transpose(2 ,0 ,1 ) lowercase_ : Optional[int] = [self._np_extract_fbank_features(__UpperCamelCase ) for waveform in input_features[0]] if isinstance(input_features[0] ,__UpperCamelCase ): lowercase_ : int = [np.asarray(__UpperCamelCase ,dtype=np.floataa ) for feature in input_features] else: lowercase_ : Tuple = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) lowercase_ : Union[str, Any] = padded_inputs['attention_mask'][:, :: self.hop_length] if return_tensors is not None: lowercase_ : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase ) return padded_inputs def _UpperCAmelCase ( self ) -> Dict[str, Any]: '''simple docstring''' lowercase_ : List[str] = copy.deepcopy(self.__dict__ ) lowercase_ : Union[str, Any] = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
321
"""simple docstring""" import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=33 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=16 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=3 ,__UpperCamelCase=4 ,__UpperCamelCase=None ,) -> List[Any]: '''simple docstring''' lowercase_ : Any = parent lowercase_ : str = batch_size lowercase_ : List[Any] = seq_length lowercase_ : Dict = is_training lowercase_ : Tuple = use_input_mask lowercase_ : Optional[Any] = use_token_type_ids lowercase_ : List[str] = use_labels lowercase_ : Any = vocab_size lowercase_ : List[str] = hidden_size lowercase_ : Optional[int] = num_hidden_layers lowercase_ : int = num_attention_heads lowercase_ : int = intermediate_size lowercase_ : List[Any] = hidden_act lowercase_ : Optional[int] = hidden_dropout_prob lowercase_ : Tuple = attention_probs_dropout_prob lowercase_ : Tuple = max_position_embeddings lowercase_ : Optional[int] = type_vocab_size lowercase_ : Optional[int] = type_sequence_label_size lowercase_ : Dict = initializer_range lowercase_ : int = num_labels lowercase_ : Any = num_choices lowercase_ : int = scope def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : Dict = None if self.use_input_mask: lowercase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase_ : Tuple = None lowercase_ : Tuple = None lowercase_ : Tuple = None if self.use_labels: lowercase_ : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) lowercase_ : int = ids_tensor([self.batch_size] ,self.num_choices ) lowercase_ : str = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' return EsmConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : List[Any] = EsmModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Tuple = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ) lowercase_ : Union[str, Any] = model(__UpperCamelCase ) lowercase_ : int = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Dict = EsmForMaskedLM(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : int = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : str = self.num_labels lowercase_ : int = EsmForTokenClassification(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Any = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : Optional[int] = config_and_inputs lowercase_ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ): lowercase = False lowercase = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) lowercase = () lowercase = ( { 'feature-extraction': EsmModel, 'fill-mask': EsmForMaskedLM, 'text-classification': EsmForSequenceClassification, 'token-classification': EsmForTokenClassification, 'zero-shot': EsmForSequenceClassification, } if is_torch_available() else {} ) lowercase = True def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Dict = EsmModelTester(self ) lowercase_ : List[Any] = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase_ : Optional[Any] = type self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase ) @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : List[str] = EsmModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0] lowercase_ : str = EsmEmbeddings(config=__UpperCamelCase ) lowercase_ : Tuple = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) lowercase_ : List[Any] = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) lowercase_ : Tuple = create_position_ids_from_input_ids(__UpperCamelCase ,model.padding_idx ) self.assertEqual(position_ids.shape ,expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()[0] lowercase_ : List[Any] = EsmEmbeddings(config=__UpperCamelCase ) lowercase_ : List[Any] = torch.empty(2 ,4 ,30 ) lowercase_ : List[str] = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] lowercase_ : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] ) lowercase_ : List[str] = embeddings.create_position_ids_from_inputs_embeds(__UpperCamelCase ) self.assertEqual(position_ids.shape ,expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) ) @unittest.skip('Esm does not support embedding resizing' ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' pass @unittest.skip('Esm does not support embedding resizing' ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass @require_torch class UpperCamelCase ( lowercase_ ): @slow def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' with torch.no_grad(): lowercase_ : Any = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() lowercase_ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) lowercase_ : List[str] = model(__UpperCamelCase )[0] lowercase_ : Optional[int] = 33 lowercase_ : Union[str, Any] = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : List[str] = torch.tensor( [[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) ) @slow def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' with torch.no_grad(): lowercase_ : int = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() lowercase_ : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) lowercase_ : Dict = model(__UpperCamelCase )[0] # compare the actual values for a slice. lowercase_ : Any = torch.tensor( [[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
321
1
"""simple docstring""" import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class A__ ( _lowerCamelCase): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE="last" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ): __lowerCAmelCase : int = parent __lowerCAmelCase : List[Any] = batch_size __lowerCAmelCase : Dict = seq_length __lowerCAmelCase : int = is_training __lowerCAmelCase : Optional[int] = use_input_lengths __lowerCAmelCase : str = use_token_type_ids __lowerCAmelCase : int = use_labels __lowerCAmelCase : Union[str, Any] = gelu_activation __lowerCAmelCase : List[str] = sinusoidal_embeddings __lowerCAmelCase : Tuple = causal __lowerCAmelCase : Union[str, Any] = asm __lowerCAmelCase : str = n_langs __lowerCAmelCase : Dict = vocab_size __lowerCAmelCase : List[str] = n_special __lowerCAmelCase : Union[str, Any] = hidden_size __lowerCAmelCase : Optional[Any] = num_hidden_layers __lowerCAmelCase : Union[str, Any] = num_attention_heads __lowerCAmelCase : str = hidden_dropout_prob __lowerCAmelCase : Tuple = attention_probs_dropout_prob __lowerCAmelCase : str = max_position_embeddings __lowerCAmelCase : List[str] = type_vocab_size __lowerCAmelCase : str = type_sequence_label_size __lowerCAmelCase : int = initializer_range __lowerCAmelCase : List[Any] = num_labels __lowerCAmelCase : Optional[Any] = num_choices __lowerCAmelCase : Any = summary_type __lowerCAmelCase : str = use_proj __lowerCAmelCase : Optional[int] = scope def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCAmelCase : int = None if self.use_input_lengths: __lowerCAmelCase : Tuple = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length __lowerCAmelCase : str = None if self.use_token_type_ids: __lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) __lowerCAmelCase : Optional[int] = None __lowerCAmelCase : int = None __lowerCAmelCase : int = None if self.use_labels: __lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , 2 ).float() __lowerCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices ) __lowerCAmelCase : Any = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def __lowerCamelCase ( self ): return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ): __lowerCAmelCase : int = FlaubertModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE , lengths=_SCREAMING_SNAKE_CASE , langs=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE , langs=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ): __lowerCAmelCase : str = FlaubertWithLMHeadModel(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ): __lowerCAmelCase : Tuple = FlaubertForQuestionAnsweringSimple(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ): __lowerCAmelCase : Optional[Any] = FlaubertForQuestionAnswering(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = model( _SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , cls_index=_SCREAMING_SNAKE_CASE , is_impossible=_SCREAMING_SNAKE_CASE , p_mask=_SCREAMING_SNAKE_CASE , ) __lowerCAmelCase : Dict = model( _SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , cls_index=_SCREAMING_SNAKE_CASE , is_impossible=_SCREAMING_SNAKE_CASE , ) ((__lowerCAmelCase) , ) : Optional[Any] = result_with_labels.to_tuple() __lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE ) ((__lowerCAmelCase) , ) : str = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ): __lowerCAmelCase : List[Any] = FlaubertForSequenceClassification(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ): __lowerCAmelCase : str = self.num_labels __lowerCAmelCase : List[str] = FlaubertForTokenClassification(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ): __lowerCAmelCase : Union[str, Any] = self.num_choices __lowerCAmelCase : Optional[int] = FlaubertForMultipleChoice(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCAmelCase : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCAmelCase : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCAmelCase : str = model( _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCamelCase ( self ): __lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs() ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) : List[str] = config_and_inputs __lowerCAmelCase : Optional[int] = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths, 'attention_mask': input_mask, } return config, inputs_dict @require_torch class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase): A_ : List[Any] = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) A_ : int = ( { 'feature-extraction': FlaubertModel, 'fill-mask': FlaubertWithLMHeadModel, 'question-answering': FlaubertForQuestionAnsweringSimple, 'text-classification': FlaubertForSequenceClassification, 'token-classification': FlaubertForTokenClassification, 'zero-shot': FlaubertForSequenceClassification, } if is_torch_available() else {} ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ): __lowerCAmelCase : Union[str, Any] = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": __lowerCAmelCase : Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ) return inputs_dict def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[int] = FlaubertModelTester(self ) __lowerCAmelCase : List[Any] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , emb_dim=37 ) def __lowerCamelCase ( self ): self.config_tester.run_common_tests() def __lowerCamelCase ( self ): __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*_SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : Any = FlaubertModel.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) @slow @require_torch_gpu def __lowerCamelCase ( self ): __lowerCAmelCase , __lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return __lowerCAmelCase : str = True __lowerCAmelCase : Any = model_class(config=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = torch.jit.trace( _SCREAMING_SNAKE_CASE , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , 'traced_model.pt' ) ) __lowerCAmelCase : Union[str, Any] = torch.jit.load(os.path.join(_SCREAMING_SNAKE_CASE , 'traced_model.pt' ) , map_location=_SCREAMING_SNAKE_CASE ) loaded(inputs_dict['input_ids'].to(_SCREAMING_SNAKE_CASE ) , inputs_dict['attention_mask'].to(_SCREAMING_SNAKE_CASE ) ) @require_torch class A__ ( unittest.TestCase): @slow def __lowerCamelCase ( self ): __lowerCAmelCase : Tuple = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased' ) __lowerCAmelCase : Optional[int] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) with torch.no_grad(): __lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE )[0] __lowerCAmelCase : List[Any] = torch.Size((1, 11, 7_68) ) self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = torch.tensor( [[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
86
"""simple docstring""" import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class UpperCAmelCase_ ( A_, A_, unittest.TestCase ): lowercase__ = IFPipeline lowercase__ = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''} lowercase__ = TEXT_TO_IMAGE_BATCH_PARAMS lowercase__ = PipelineTesterMixin.required_optional_params - {'''latents'''} def __magic_name__ ( self : Dict ) -> List[str]: '''simple docstring''' return self._get_dummy_components() def __magic_name__ ( self : int , snake_case_ : Tuple , snake_case_ : Union[str, Any]=0 ) -> Optional[int]: '''simple docstring''' if str(snake_case_ ).startswith("mps" ): A__ = torch.manual_seed(snake_case_ ) else: A__ = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ ) A__ = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def __magic_name__ ( self : int ) -> str: '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' super().test_save_load_floataa(expected_max_diff=1e-1 ) def __magic_name__ ( self : Optional[int] ) -> Dict: '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def __magic_name__ ( self : List[str] ) -> Dict: '''simple docstring''' self._test_save_load_local() def __magic_name__ ( self : List[str] ) -> int: '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1e-2 , ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def __magic_name__ ( self : Any ) -> Any: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @slow @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase ): def __magic_name__ ( self : Optional[int] ) -> int: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __magic_name__ ( self : Any ) -> Optional[int]: '''simple docstring''' A__ = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa ) A__ = IFSuperResolutionPipeline.from_pretrained( "DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=snake_case_ , tokenizer=snake_case_ ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("cuda" ) A__, A__ = pipe_a.encode_prompt("anime turtle" , device="cuda" ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() A__ = None A__ = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img A__ = IFImgaImgPipeline(**pipe_a.components ) A__ = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting A__ = IFInpaintingPipeline(**pipe_a.components ) A__ = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) def __magic_name__ ( self : Any , snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : str ) -> Union[str, Any]: '''simple docstring''' _start_torch_memory_measurement() A__ = torch.Generator(device="cpu" ).manual_seed(0 ) A__ = pipe_a( prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , num_inference_steps=2 , generator=snake_case_ , output_type="np" , ) A__ = output.images[0] assert image.shape == (64, 64, 3) A__ = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 A__ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" ) assert_mean_pixel_difference(snake_case_ , snake_case_ ) # pipeline 2 _start_torch_memory_measurement() A__ = torch.Generator(device="cpu" ).manual_seed(0 ) A__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case_ ) A__ = pipe_a( prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , image=snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type="np" , ) A__ = output.images[0] assert image.shape == (256, 256, 3) A__ = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 A__ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" ) assert_mean_pixel_difference(snake_case_ , snake_case_ ) def __magic_name__ ( self : Dict , snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : int ) -> Optional[int]: '''simple docstring''' _start_torch_memory_measurement() A__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case_ ) A__ = torch.Generator(device="cpu" ).manual_seed(0 ) A__ = pipe_a( prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , image=snake_case_ , num_inference_steps=2 , generator=snake_case_ , output_type="np" , ) A__ = output.images[0] assert image.shape == (64, 64, 3) A__ = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 A__ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" ) assert_mean_pixel_difference(snake_case_ , snake_case_ ) # pipeline 2 _start_torch_memory_measurement() A__ = torch.Generator(device="cpu" ).manual_seed(0 ) A__ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(snake_case_ ) A__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case_ ) A__ = pipe_a( prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , image=snake_case_ , original_image=snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type="np" , ) A__ = output.images[0] assert image.shape == (256, 256, 3) A__ = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 A__ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" ) assert_mean_pixel_difference(snake_case_ , snake_case_ ) def __magic_name__ ( self : Optional[Any] , snake_case_ : str , snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' _start_torch_memory_measurement() A__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case_ ) A__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(snake_case_ ) A__ = torch.Generator(device="cpu" ).manual_seed(0 ) A__ = pipe_a( prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , image=snake_case_ , mask_image=snake_case_ , num_inference_steps=2 , generator=snake_case_ , output_type="np" , ) A__ = output.images[0] assert image.shape == (64, 64, 3) A__ = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 A__ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" ) assert_mean_pixel_difference(snake_case_ , snake_case_ ) # pipeline 2 _start_torch_memory_measurement() A__ = torch.Generator(device="cpu" ).manual_seed(0 ) A__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case_ ) A__ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(snake_case_ ) A__ = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(snake_case_ ) A__ = pipe_a( prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , image=snake_case_ , mask_image=snake_case_ , original_image=snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type="np" , ) A__ = output.images[0] assert image.shape == (256, 256, 3) A__ = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 A__ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" ) assert_mean_pixel_difference(snake_case_ , snake_case_ ) def _SCREAMING_SNAKE_CASE ( ) -> List[Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
247
0
'''simple docstring''' # Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Any ): '''simple docstring''' snake_case_ : Union[str, Any] = { """en""": """Machine learning is great, isn't it?""", """ru""": """Машинное обучение - это здорово, не так ли?""", """de""": """Maschinelles Lernen ist großartig, oder?""", } # BLUE scores as follows: # "pair": [fairseq, transformers] snake_case_ : List[str] = { """ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""], """en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""], """en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""], """de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""], } snake_case_ : List[Any] = F'''{src_lang}-{tgt_lang}''' snake_case_ : List[str] = F''' --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt19 - facebook license: apache-2.0 datasets: - wmt19 metrics: - bleu --- # FSMT ## Model description This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}. For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616). The abbreviation FSMT stands for FairSeqMachineTranslation All four models are available: * [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) * [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en) * [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de) * [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "facebook/wmt19-{src_lang}-{tgt_lang}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias - The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981) ## Training data Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616). ## Eval results pair | fairseq | transformers -------|---------|---------- {pair} | {scores[pair][0]} | {scores[pair][1]} The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support: - model ensemble, therefore the best performing checkpoint was ported (``model4.pt``). - re-ranking The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=15 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`. ## Data Sources - [training, etc.](http://www.statmt.org/wmt19/) - [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561) ### BibTeX entry and citation info ```bibtex @inproceedings{{..., year={{2020}}, title={{Facebook FAIR\'s WMT19 News Translation Task Submission}}, author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}}, booktitle={{Proc. of WMT}}, }} ``` ## TODO - port model ensemble (fairseq uses 4 model checkpoints) ''' os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ ) snake_case_ : Any = os.path.join(lowerCamelCase_ , """README.md""" ) print(F'''Generating {path}''' ) with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f: f.write(lowerCamelCase_ ) # make sure we are under the root of the project __A : List[str] = Path(__file__).resolve().parent.parent.parent __A : Any = repo_dir / 'model_cards' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: __A, __A, __A : Any = model_name.split('-') __A : Any = model_cards_dir / 'facebook' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
8
'''simple docstring''' import re def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : List[Any] = re.compile( R"""^(?:0|94|\+94|0{2}94)""" R"""7(0|1|2|4|5|6|7|8)""" R"""(-| |)""" R"""\d{7}$""" ) return bool(re.search(lowerCamelCase_ , lowerCamelCase_ ) ) if __name__ == "__main__": __A : int = '0094702343221' print(is_sri_lankan_phone_number(phone))
8
1
import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __UpperCamelCase ( self : Tuple ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = { "task_specific_params": { "summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4}, "summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4}, "summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6}, } } SCREAMING_SNAKE_CASE : int = { "task_specific_params.summarization.length_penalty": 1.0, "task_specific_params.summarization.max_length": 128, "task_specific_params.summarization.min_length": 12, "task_specific_params.summarization.num_beams": 4, "task_specific_params.summarization_cnn.length_penalty": 2.0, "task_specific_params.summarization_cnn.max_length": 142, "task_specific_params.summarization_cnn.min_length": 56, "task_specific_params.summarization_cnn.num_beams": 4, "task_specific_params.summarization_xsum.length_penalty": 1.0, "task_specific_params.summarization_xsum.max_length": 62, "task_specific_params.summarization_xsum.min_length": 11, "task_specific_params.summarization_xsum.num_beams": 6, } self.assertEqual(flatten_dict(a ) , a ) def __UpperCamelCase ( self : int ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(a ) , x.transpose() ) ) SCREAMING_SNAKE_CASE : str = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def __UpperCamelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : str = np.random.randn(3 , 4 ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(a ) self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randn(3 , 4 , 5 ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(a ) self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) ) @require_tf def __UpperCamelCase ( self : Tuple ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = np.random.randn(3 , 4 ) SCREAMING_SNAKE_CASE : Tuple = tf.constant(a ) self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) ) SCREAMING_SNAKE_CASE : int = np.random.randn(3 , 4 , 5 ) SCREAMING_SNAKE_CASE : Dict = tf.constant(a ) self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) ) @require_flax def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randn(3 , 4 ) SCREAMING_SNAKE_CASE : List[Any] = jnp.array(a ) self.assertTrue(np.allclose(transpose(a ) , np.asarray(transpose(a ) ) ) ) SCREAMING_SNAKE_CASE : List[Any] = np.random.randn(3 , 4 , 5 ) SCREAMING_SNAKE_CASE : Any = jnp.array(a ) self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , np.asarray(transpose(a , axes=(1, 2, 0) ) ) ) ) def __UpperCamelCase ( self : List[Any] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.reshape(a , (4, 3) ) ) ) SCREAMING_SNAKE_CASE : Tuple = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.reshape(a , (12, 5) ) ) ) @require_torch def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = np.random.randn(3 , 4 ) SCREAMING_SNAKE_CASE : List[str] = torch.tensor(a ) self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) ) SCREAMING_SNAKE_CASE : Dict = np.random.randn(3 , 4 , 5 ) SCREAMING_SNAKE_CASE : Any = torch.tensor(a ) self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) ) @require_tf def __UpperCamelCase ( self : Any ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randn(3 , 4 ) SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(a ) self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randn(3 , 4 , 5 ) SCREAMING_SNAKE_CASE : int = tf.constant(a ) self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) ) @require_flax def __UpperCamelCase ( self : Optional[int] ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = np.random.randn(3 , 4 ) SCREAMING_SNAKE_CASE : Optional[Any] = jnp.array(a ) self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.asarray(reshape(a , (4, 3) ) ) ) ) SCREAMING_SNAKE_CASE : str = np.random.randn(3 , 4 , 5 ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array(a ) self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.asarray(reshape(a , (12, 5) ) ) ) ) def __UpperCamelCase ( self : int ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(a ) , np.squeeze(a ) ) ) SCREAMING_SNAKE_CASE : str = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.squeeze(a , axis=2 ) ) ) @require_torch def __UpperCamelCase ( self : List[str] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : str = np.random.randn(1 , 3 , 4 ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(a ) self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) ) SCREAMING_SNAKE_CASE : int = np.random.randn(1 , 4 , 1 , 5 ) SCREAMING_SNAKE_CASE : Dict = torch.tensor(a ) self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) ) @require_tf def __UpperCamelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : str = np.random.randn(1 , 3 , 4 ) SCREAMING_SNAKE_CASE : str = tf.constant(a ) self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) ) SCREAMING_SNAKE_CASE : List[str] = np.random.randn(1 , 4 , 1 , 5 ) SCREAMING_SNAKE_CASE : Any = tf.constant(a ) self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) ) @require_flax def __UpperCamelCase ( self : int ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = np.random.randn(1 , 3 , 4 ) SCREAMING_SNAKE_CASE : List[str] = jnp.array(a ) self.assertTrue(np.allclose(squeeze(a ) , np.asarray(squeeze(a ) ) ) ) SCREAMING_SNAKE_CASE : int = np.random.randn(1 , 4 , 1 , 5 ) SCREAMING_SNAKE_CASE : str = jnp.array(a ) self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.asarray(squeeze(a , axis=2 ) ) ) ) def __UpperCamelCase ( self : str ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.expand_dims(a , axis=1 ) ) ) @require_torch def __UpperCamelCase ( self : Optional[int] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : str = np.random.randn(3 , 4 ) SCREAMING_SNAKE_CASE : Tuple = torch.tensor(a ) self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) ) @require_tf def __UpperCamelCase ( self : Optional[int] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randn(3 , 4 ) SCREAMING_SNAKE_CASE : Dict = tf.constant(a ) self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) ) @require_flax def __UpperCamelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randn(3 , 4 ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array(a ) self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.asarray(expand_dims(a , axis=1 ) ) ) )
76
a_ = { 'A': ['B', 'C', 'E'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F', 'G'], 'D': ['B'], 'E': ['A', 'B', 'D'], 'F': ['C'], 'G': ['C'], } def lowerCamelCase__ ( _a , _a , _a): SCREAMING_SNAKE_CASE : int = set() # keep track of all the paths to be checked SCREAMING_SNAKE_CASE : int = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue SCREAMING_SNAKE_CASE : Optional[int] = queue.pop(0) # get the last node from the path SCREAMING_SNAKE_CASE : Union[str, Any] = path[-1] if node not in explored: SCREAMING_SNAKE_CASE : List[str] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: SCREAMING_SNAKE_CASE : List[Any] = list(_a) new_path.append(_a) queue.append(_a) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(_a) # in case there's no path between the 2 nodes return [] def lowerCamelCase__ ( _a , _a , _a): if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 SCREAMING_SNAKE_CASE : str = [start] SCREAMING_SNAKE_CASE : Optional[Any] = set(_a) # Keep tab on distances from `start` node. SCREAMING_SNAKE_CASE : Union[str, Any] = {start: 0, target: -1} while queue: SCREAMING_SNAKE_CASE : Optional[int] = queue.pop(0) if node == target: SCREAMING_SNAKE_CASE : Union[str, Any] = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node]) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(_a) queue.append(_a) SCREAMING_SNAKE_CASE : Optional[Any] = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
76
1
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class snake_case ( unittest.TestCase): def a_ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def a_ ( self : Dict ) -> Optional[int]: '''simple docstring''' _A = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" ) _A = sd_pipe.to(_lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCamelCase ) sd_pipe.set_scheduler("sample_euler" ) _A = '''A painting of a squirrel eating a burger''' _A = torch.manual_seed(0 ) _A = sd_pipe([prompt] , generator=_lowerCamelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" ) _A = output.images _A = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) _A = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def a_ ( self : List[Any] ) -> List[str]: '''simple docstring''' _A = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" ) _A = sd_pipe.to(_lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCamelCase ) sd_pipe.set_scheduler("sample_euler" ) _A = '''A painting of a squirrel eating a burger''' _A = torch.manual_seed(0 ) _A = sd_pipe([prompt] , generator=_lowerCamelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" ) _A = output.images _A = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) _A = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1 def a_ ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' _A = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" ) _A = sd_pipe.to(_lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCamelCase ) sd_pipe.set_scheduler("sample_dpmpp_2m" ) _A = '''A painting of a squirrel eating a burger''' _A = torch.manual_seed(0 ) _A = sd_pipe( [prompt] , generator=_lowerCamelCase , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=_lowerCamelCase , ) _A = output.images _A = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) _A = np.array( [0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
354
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"} class snake_case ( _UpperCamelCase): __UpperCamelCase = 'ctrl' __UpperCamelCase = ['past_key_values'] __UpperCamelCase = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : Tuple , a__ : Union[str, Any]=24_65_34 , a__ : int=2_56 , a__ : Any=12_80 , a__ : Optional[int]=81_92 , a__ : Union[str, Any]=48 , a__ : Optional[int]=16 , a__ : List[str]=0.1 , a__ : List[str]=0.1 , a__ : Optional[int]=1E-6 , a__ : Optional[int]=0.0_2 , a__ : Tuple=True , **a__ : List[Any] , ) -> Tuple: '''simple docstring''' _A = vocab_size _A = n_positions _A = n_embd _A = n_layer _A = n_head _A = dff _A = resid_pdrop _A = embd_pdrop _A = layer_norm_epsilon _A = initializer_range _A = use_cache super().__init__(**a__ )
163
0
'''simple docstring''' from ...utils import deprecate from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401 deprecate( "stable diffusion controlnet", "0.22.0", "Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.", standard_warn=False, stacklevel=3, )
47
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class __A ( unittest.TestCase ): def _lowercase (self : Tuple ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowercase (self : str ): UpperCAmelCase_ = 1 UpperCAmelCase_ = 3 UpperCAmelCase_ = (32, 32) UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a ) return image @property def _lowercase (self : int ): torch.manual_seed(0 ) UpperCAmelCase_ = UNetaDConditionModel( block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__a , only_cross_attention=(True, True, False) , num_class_embeds=100 , ) return model @property def _lowercase (self : Any ): torch.manual_seed(0 ) UpperCAmelCase_ = AutoencoderKL( block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) return model @property def _lowercase (self : Optional[Any] ): torch.manual_seed(0 ) UpperCAmelCase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , ) return CLIPTextModel(__a ) def _lowercase (self : Any ): UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ = self.dummy_cond_unet_upscale UpperCAmelCase_ = DDPMScheduler() UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" ) UpperCAmelCase_ = self.dummy_vae UpperCAmelCase_ = self.dummy_text_encoder UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk UpperCAmelCase_ = StableDiffusionUpscalePipeline( unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , ) UpperCAmelCase_ = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) UpperCAmelCase_ = "A painting of a squirrel eating a burger" UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 ) UpperCAmelCase_ = sd_pipe( [prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase_ = output.images UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 ) UpperCAmelCase_ = sd_pipe( [prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=__a , )[0] UpperCAmelCase_ = image[0, -3:, -3:, -1] UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1] UpperCAmelCase_ = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) UpperCAmelCase_ = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def _lowercase (self : Optional[int] ): UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ = self.dummy_cond_unet_upscale UpperCAmelCase_ = DDPMScheduler() UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" ) UpperCAmelCase_ = self.dummy_vae UpperCAmelCase_ = self.dummy_text_encoder UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk UpperCAmelCase_ = StableDiffusionUpscalePipeline( unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , ) UpperCAmelCase_ = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) UpperCAmelCase_ = "A painting of a squirrel eating a burger" UpperCAmelCase_ = sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase_ = output.images assert image.shape[0] == 2 UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 ) UpperCAmelCase_ = sd_pipe( [prompt] , image=__a , generator=__a , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase_ = output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def _lowercase (self : str ): UpperCAmelCase_ = self.dummy_cond_unet_upscale UpperCAmelCase_ = DDPMScheduler() UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" ) UpperCAmelCase_ = self.dummy_vae UpperCAmelCase_ = self.dummy_text_encoder UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 UpperCAmelCase_ = unet.half() UpperCAmelCase_ = text_encoder.half() # make sure here that pndm scheduler skips prk UpperCAmelCase_ = StableDiffusionUpscalePipeline( unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , ) UpperCAmelCase_ = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) UpperCAmelCase_ = "A painting of a squirrel eating a burger" UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = sd_pipe( [prompt] , image=__a , generator=__a , num_inference_steps=2 , output_type="np" , ).images UpperCAmelCase_ = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class __A ( unittest.TestCase ): def _lowercase (self : List[str] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase (self : List[Any] ): UpperCAmelCase_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) UpperCAmelCase_ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale" "/upsampled_cat.npy" ) UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler" UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(__a ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() UpperCAmelCase_ = "a cat sitting on a park bench" UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = pipe( prompt=__a , image=__a , generator=__a , output_type="np" , ) UpperCAmelCase_ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1E-3 def _lowercase (self : Tuple ): UpperCAmelCase_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) UpperCAmelCase_ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale" "/upsampled_cat_fp16.npy" ) UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler" UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained( __a , torch_dtype=torch.floataa , ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() UpperCAmelCase_ = "a cat sitting on a park bench" UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = pipe( prompt=__a , image=__a , generator=__a , output_type="np" , ) UpperCAmelCase_ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5E-1 def _lowercase (self : List[Any] ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() UpperCAmelCase_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler" UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained( __a , torch_dtype=torch.floataa , ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() UpperCAmelCase_ = "a cat sitting on a park bench" UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = pipe( prompt=__a , image=__a , generator=__a , num_inference_steps=5 , output_type="np" , ) UpperCAmelCase_ = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
1
0
'''simple docstring''' import warnings from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging _a : int = logging.get_logger(__name__) class _UpperCAmelCase ( lowerCAmelCase_ ): a : List[Any] =["""input_values""", """attention_mask"""] def __init__( self,__SCREAMING_SNAKE_CASE = 1,__SCREAMING_SNAKE_CASE = 1_60_00,__SCREAMING_SNAKE_CASE = 0.0,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = 80,__SCREAMING_SNAKE_CASE = 16,__SCREAMING_SNAKE_CASE = 64,__SCREAMING_SNAKE_CASE = "hann_window",__SCREAMING_SNAKE_CASE = 1.0,__SCREAMING_SNAKE_CASE = 80,__SCREAMING_SNAKE_CASE = 76_00,__SCREAMING_SNAKE_CASE = 1e-10,__SCREAMING_SNAKE_CASE = 2,__SCREAMING_SNAKE_CASE = True,**__SCREAMING_SNAKE_CASE,): '''simple docstring''' super().__init__(feature_size=__SCREAMING_SNAKE_CASE,sampling_rate=__SCREAMING_SNAKE_CASE,padding_value=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = do_normalize __lowerCAmelCase = return_attention_mask __lowerCAmelCase = num_mel_bins __lowerCAmelCase = hop_length __lowerCAmelCase = win_length __lowerCAmelCase = win_function __lowerCAmelCase = frame_signal_scale __lowerCAmelCase = fmin __lowerCAmelCase = fmax __lowerCAmelCase = mel_floor __lowerCAmelCase = reduction_factor __lowerCAmelCase = win_length * sampling_rate // 10_00 __lowerCAmelCase = hop_length * sampling_rate // 10_00 __lowerCAmelCase = optimal_fft_length(self.sample_size ) __lowerCAmelCase = (self.n_fft // 2) + 1 __lowerCAmelCase = window_function(window_length=self.sample_size,name=self.win_function,periodic=__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = mel_filter_bank( num_frequency_bins=self.n_freqs,num_mel_filters=self.num_mel_bins,min_frequency=self.fmin,max_frequency=self.fmax,sampling_rate=self.sampling_rate,norm="""slaney""",mel_scale="""slaney""",) if frame_signal_scale != 1.0: warnings.warn( """The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers""",__SCREAMING_SNAKE_CASE,) if reduction_factor != 2.0: warnings.warn( """The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers""",__SCREAMING_SNAKE_CASE,) @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def lowerCamelCase__ ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = 0.0 ): '''simple docstring''' if attention_mask is not None: __lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE,np.intaa ) __lowerCAmelCase = [] for vector, length in zip(__SCREAMING_SNAKE_CASE,attention_mask.sum(-1 ) ): __lowerCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 ) if length < normed_slice.shape[0]: __lowerCAmelCase = padding_value normed_input_values.append(__SCREAMING_SNAKE_CASE ) else: __lowerCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values] return normed_input_values def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,): '''simple docstring''' __lowerCAmelCase = spectrogram( __SCREAMING_SNAKE_CASE,window=self.window,frame_length=self.sample_size,hop_length=self.sample_stride,fft_length=self.n_fft,mel_filters=self.mel_filters,mel_floor=self.mel_floor,log_mel="""log10""",) return log_mel_spec.T def __call__( self,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,): '''simple docstring''' if audio is None and audio_target is None: raise ValueError("""You must provide either `audio` or `audio_target` values.""" ) if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' f' {self.sampling_rate}. Please make sure that the provided audio input was sampled with' f' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( """It is strongly recommended to pass the ``sampling_rate`` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) if audio is not None: __lowerCAmelCase = self._process_audio( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,) else: __lowerCAmelCase = None if audio_target is not None: __lowerCAmelCase = self._process_audio( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,) if inputs is None: return inputs_target else: __lowerCAmelCase = inputs_target["""input_values"""] __lowerCAmelCase = inputs_target.get("""attention_mask""" ) if decoder_attention_mask is not None: __lowerCAmelCase = decoder_attention_mask return inputs def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,): '''simple docstring''' __lowerCAmelCase = isinstance(__SCREAMING_SNAKE_CASE,np.ndarray ) and len(speech.shape ) > 1 if is_batched_numpy and len(speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) __lowerCAmelCase = is_batched_numpy or ( isinstance(__SCREAMING_SNAKE_CASE,(list, tuple) ) and (isinstance(speech[0],(np.ndarray, tuple, list) )) ) if is_batched: __lowerCAmelCase = [np.asarray(__SCREAMING_SNAKE_CASE,dtype=np.floataa ) for speech in speech] elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE,np.ndarray ): __lowerCAmelCase = np.asarray(__SCREAMING_SNAKE_CASE,dtype=np.floataa ) elif isinstance(__SCREAMING_SNAKE_CASE,np.ndarray ) and speech.dtype is np.dtype(np.floataa ): __lowerCAmelCase = speech.astype(np.floataa ) # always return batch if not is_batched: __lowerCAmelCase = [speech] # needed to make pad() work on spectrogram inputs __lowerCAmelCase = self.feature_size # convert into correct format for padding if is_target: __lowerCAmelCase = [self._extract_mel_features(__SCREAMING_SNAKE_CASE ) for waveform in speech] __lowerCAmelCase = BatchFeature({"""input_values""": features} ) __lowerCAmelCase = self.num_mel_bins else: __lowerCAmelCase = BatchFeature({"""input_values""": speech} ) __lowerCAmelCase = self.pad( __SCREAMING_SNAKE_CASE,padding=__SCREAMING_SNAKE_CASE,max_length=__SCREAMING_SNAKE_CASE,truncation=__SCREAMING_SNAKE_CASE,pad_to_multiple_of=__SCREAMING_SNAKE_CASE,return_attention_mask=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,) __lowerCAmelCase = feature_size_hack # convert input values to correct format __lowerCAmelCase = padded_inputs["""input_values"""] if not isinstance(input_values[0],np.ndarray ): __lowerCAmelCase = [np.asarray(__SCREAMING_SNAKE_CASE,dtype=np.floataa ) for array in input_values] elif ( not isinstance(__SCREAMING_SNAKE_CASE,np.ndarray ) and isinstance(input_values[0],np.ndarray ) and input_values[0].dtype is np.dtype(np.floataa ) ): __lowerCAmelCase = [array.astype(np.floataa ) for array in input_values] elif isinstance(__SCREAMING_SNAKE_CASE,np.ndarray ) and input_values.dtype is np.dtype(np.floataa ): __lowerCAmelCase = input_values.astype(np.floataa ) # convert attention_mask to correct format __lowerCAmelCase = padded_inputs.get("""attention_mask""" ) if attention_mask is not None: __lowerCAmelCase = [np.asarray(__SCREAMING_SNAKE_CASE,dtype=np.intaa ) for array in attention_mask] # zero-mean and unit-variance normalization if not is_target and self.do_normalize: __lowerCAmelCase = ( attention_mask if self._get_padding_strategies(__SCREAMING_SNAKE_CASE,max_length=__SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD else None ) __lowerCAmelCase = self.zero_mean_unit_var_norm( padded_inputs["""input_values"""],attention_mask=__SCREAMING_SNAKE_CASE,padding_value=self.padding_value ) if return_tensors is not None: __lowerCAmelCase = padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE ) return padded_inputs def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = super().to_dict() # Don't serialize these as they are derived from the other properties. __lowerCAmelCase = ["""window""", """mel_filters""", """sample_size""", """sample_stride""", """n_fft""", """n_freqs"""] for name in names: if name in output: del output[name] return output
46
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from .config import config_command_parser from .config_args import default_config_file, load_config_from_file # noqa: F401 from .default import default_command_parser from .update import update_command_parser def _lowerCAmelCase ( lowercase=None ) -> Any: __lowerCAmelCase = argparse.ArgumentParser(add_help=lowercase , allow_abbrev=lowercase ) # The main config parser __lowerCAmelCase = config_command_parser(lowercase ) # The subparser to add commands to __lowerCAmelCase = config_parser.add_subparsers(title="""subcommands""" , dest="""subcommand""" ) # Then add other parsers with the parent parser default_command_parser(lowercase , parents=[parent_parser] ) update_command_parser(lowercase , parents=[parent_parser] ) return config_parser def _lowerCAmelCase ( ) -> List[Any]: __lowerCAmelCase = get_config_parser() __lowerCAmelCase = config_parser.parse_args() if not hasattr(lowercase , """func""" ): config_parser.print_help() exit(1 ) # Run args.func(lowercase ) if __name__ == "__main__": main()
46
1
from collections.abc import Sequence def snake_case_ ( lowerCAmelCase_ : Sequence[float] , lowerCAmelCase_ : bool = False ): if not arr: return 0 __lowercase : Any = 0 if allow_empty_subarrays else float("""-inf""" ) __lowercase : Optional[Any] = 0.0 for num in arr: __lowercase : Optional[int] = max(0 if allow_empty_subarrays else num , curr_sum + num ) __lowercase : List[str] = max(lowerCAmelCase_ , lowerCAmelCase_ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() lowerCamelCase : str = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(f'''{max_subarray_sum(nums) = }''')
233
import math def snake_case_ ( lowerCAmelCase_ : int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowerCAmelCase_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def snake_case_ ( lowerCAmelCase_ : int = 10001 ): try: __lowercase : Optional[int] = int(lowerCAmelCase_ ) except (TypeError, ValueError): raise TypeError("""Parameter nth must be int or castable to int.""" ) from None if nth <= 0: raise ValueError("""Parameter nth must be greater than or equal to one.""" ) __lowercase : list[int] = [] __lowercase : str = 2 while len(lowerCAmelCase_ ) < nth: if is_prime(lowerCAmelCase_ ): primes.append(lowerCAmelCase_ ) num += 1 else: num += 1 return primes[len(lowerCAmelCase_ ) - 1] if __name__ == "__main__": print(f'''{solution() = }''')
233
1
"""simple docstring""" lowerCAmelCase__ = tuple[float, float, float] lowerCAmelCase__ = tuple[float, float, float] def a__ ( SCREAMING_SNAKE_CASE : Pointad , SCREAMING_SNAKE_CASE : Pointad ): '''simple docstring''' lowerCAmelCase : Any = end_pointa[0] - end_pointa[0] lowerCAmelCase : Any = end_pointa[1] - end_pointa[1] lowerCAmelCase : Optional[int] = end_pointa[2] - end_pointa[2] return (x, y, z) def a__ ( SCREAMING_SNAKE_CASE : Vectorad , SCREAMING_SNAKE_CASE : Vectorad ): '''simple docstring''' lowerCAmelCase : Dict = ab[1] * ac[2] - ab[2] * ac[1] # *i lowerCAmelCase : Any = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j lowerCAmelCase : Union[str, Any] = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def a__ ( SCREAMING_SNAKE_CASE : Vectorad , SCREAMING_SNAKE_CASE : int ): '''simple docstring''' return tuple(round(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for x in vector ) == (0, 0, 0) def a__ ( SCREAMING_SNAKE_CASE : Pointad , SCREAMING_SNAKE_CASE : Pointad , SCREAMING_SNAKE_CASE : Pointad , SCREAMING_SNAKE_CASE : int = 1_0 ): '''simple docstring''' lowerCAmelCase : Optional[Any] = create_vector(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowerCAmelCase : str = create_vector(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return is_zero_vector(get_ad_vectors_cross(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
133
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ): """simple docstring""" a : Optional[Any] =KandinskyVaaImgaImgPipeline a : Optional[int] =["image_embeds", "negative_image_embeds", "image"] a : Optional[int] =[ "image_embeds", "negative_image_embeds", "image", ] a : str =[ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] a : Dict =False @property def lowercase__ ( self ): """simple docstring""" return 32 @property def lowercase__ ( self ): """simple docstring""" return 32 @property def lowercase__ ( self ): """simple docstring""" return self.time_input_dim @property def lowercase__ ( self ): """simple docstring""" return self.time_input_dim * 4 @property def lowercase__ ( self ): """simple docstring""" return 100 @property def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase : List[str] = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } lowerCAmelCase : int = UNetaDConditionModel(**snake_case__ ) return model @property def lowercase__ ( self ): """simple docstring""" return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase : Optional[int] = VQModel(**self.dummy_movq_kwargs ) return model def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Optional[Any] = self.dummy_unet lowerCAmelCase : Optional[int] = self.dummy_movq lowerCAmelCase : List[str] = { "num_train_timesteps": 1_000, "beta_schedule": "linear", "beta_start": 0.00085, "beta_end": 0.012, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } lowerCAmelCase : Tuple = DDIMScheduler(**snake_case__ ) lowerCAmelCase : Optional[Any] = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def lowercase__ ( self , snake_case__ , snake_case__=0 ): """simple docstring""" lowerCAmelCase : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) lowerCAmelCase : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( snake_case__ ) # create init_image lowerCAmelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) lowerCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase : List[str] = Image.fromarray(np.uinta(snake_case__ ) ).convert("RGB" ).resize((256, 256) ) if str(snake_case__ ).startswith("mps" ): lowerCAmelCase : Optional[int] = torch.manual_seed(snake_case__ ) else: lowerCAmelCase : Optional[int] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) lowerCAmelCase : List[str] = { "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 10, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Tuple = "cpu" lowerCAmelCase : Dict = self.get_dummy_components() lowerCAmelCase : Union[str, Any] = self.pipeline_class(**snake_case__ ) lowerCAmelCase : Optional[int] = pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) lowerCAmelCase : int = pipe(**self.get_dummy_inputs(snake_case__ ) ) lowerCAmelCase : Union[str, Any] = output.images lowerCAmelCase : Union[str, Any] = pipe( **self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0] lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1] lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCAmelCase : int = np.array( [0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def lowercase__ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_img2img_frog.npy" ) lowerCAmelCase : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) lowerCAmelCase : Optional[Any] = "A red cartoon frog, 4k" lowerCAmelCase : int = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa ) pipe_prior.to(snake_case__ ) lowerCAmelCase : List[Any] = KandinskyVaaImgaImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa ) lowerCAmelCase : Tuple = pipeline.to(snake_case__ ) pipeline.set_progress_bar_config(disable=snake_case__ ) lowerCAmelCase : str = torch.Generator(device="cpu" ).manual_seed(0 ) lowerCAmelCase , lowerCAmelCase : Optional[Any] = pipe_prior( snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple() lowerCAmelCase : Tuple = pipeline( image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , ) lowerCAmelCase : Optional[Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(snake_case__ , snake_case__ )
133
1
"""simple docstring""" import os import sys __lowerCAmelCase : Optional[Any] =os.path.join(os.path.dirname(__file__), """src""") sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) __lowerCAmelCase : Tuple =[ """torch""", """numpy""", """tokenizers""", """filelock""", """requests""", """tqdm""", """regex""", """sentencepiece""", """sacremoses""", """importlib_metadata""", """huggingface_hub""", ] @add_start_docstrings(AutoConfig.__doc__ ) def UpperCAmelCase__ ( *lowerCAmelCase__ :List[str] , **lowerCAmelCase__ :str ) -> str: '''simple docstring''' return AutoConfig.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ ) @add_start_docstrings(AutoTokenizer.__doc__ ) def UpperCAmelCase__ ( *lowerCAmelCase__ :Tuple , **lowerCAmelCase__ :List[Any] ) -> Tuple: '''simple docstring''' return AutoTokenizer.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ ) @add_start_docstrings(AutoModel.__doc__ ) def UpperCAmelCase__ ( *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Optional[int] ) -> Union[str, Any]: '''simple docstring''' return AutoModel.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def UpperCAmelCase__ ( *lowerCAmelCase__ :str , **lowerCAmelCase__ :Optional[Any] ) -> Any: '''simple docstring''' return AutoModelForCausalLM.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def UpperCAmelCase__ ( *lowerCAmelCase__ :Union[str, Any] , **lowerCAmelCase__ :List[Any] ) -> Tuple: '''simple docstring''' return AutoModelForMaskedLM.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def UpperCAmelCase__ ( *lowerCAmelCase__ :str , **lowerCAmelCase__ :Dict ) -> Optional[Any]: '''simple docstring''' return AutoModelForSequenceClassification.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def UpperCAmelCase__ ( *lowerCAmelCase__ :List[Any] , **lowerCAmelCase__ :Optional[int] ) -> Tuple: '''simple docstring''' return AutoModelForQuestionAnswering.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ )
197
"""simple docstring""" from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class _A : def A__ ( self , __lowerCAmelCase ): """simple docstring""" raise NotImplementedError() def A__ ( self ): """simple docstring""" raise NotImplementedError() class _A ( lowerCAmelCase ): def __init__( self , __lowerCAmelCase , __lowerCAmelCase = False , **__lowerCAmelCase ): """simple docstring""" lowercase = tokenizer lowercase = skip_prompt lowercase = decode_kwargs # variables used in the streaming process lowercase = [] lowercase = 0 lowercase = True def A__ ( self , __lowerCAmelCase ): """simple docstring""" if len(value.shape ) > 1 and value.shape[0] > 1: raise ValueError("""TextStreamer only supports batch size 1""" ) elif len(value.shape ) > 1: lowercase = value[0] if self.skip_prompt and self.next_tokens_are_prompt: lowercase = False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist() ) lowercase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) # After the symbol for a new line, we flush the cache. if text.endswith("""\n""" ): lowercase = text[self.print_len :] lowercase = [] lowercase = 0 # If the last token is a CJK character, we print the characters. elif len(__lowerCAmelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ): lowercase = text[self.print_len :] self.print_len += len(__lowerCAmelCase ) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: lowercase = text[self.print_len : text.rfind(""" """ ) + 1] self.print_len += len(__lowerCAmelCase ) self.on_finalized_text(__lowerCAmelCase ) def A__ ( self ): """simple docstring""" if len(self.token_cache ) > 0: lowercase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) lowercase = text[self.print_len :] lowercase = [] lowercase = 0 else: lowercase = """""" lowercase = True self.on_finalized_text(__lowerCAmelCase , stream_end=__lowerCAmelCase ) def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = False ): """simple docstring""" print(__lowerCAmelCase , flush=__lowerCAmelCase , end="""""" if not stream_end else None ) def A__ ( self , __lowerCAmelCase ): """simple docstring""" if ( (cp >= 0X4_e00 and cp <= 0X9_fff) or (cp >= 0X3_400 and cp <= 0X4_dbf) # or (cp >= 0X20_000 and cp <= 0X2a_6df) # or (cp >= 0X2a_700 and cp <= 0X2b_73f) # or (cp >= 0X2b_740 and cp <= 0X2b_81f) # or (cp >= 0X2b_820 and cp <= 0X2c_eaf) # or (cp >= 0Xf_900 and cp <= 0Xf_aff) or (cp >= 0X2f_800 and cp <= 0X2f_a1f) # ): # return True return False class _A ( lowerCAmelCase ): def __init__( self , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = None , **__lowerCAmelCase ): """simple docstring""" super().__init__(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) lowercase = Queue() lowercase = None lowercase = timeout def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = False ): """simple docstring""" self.text_queue.put(__lowerCAmelCase , timeout=self.timeout ) if stream_end: self.text_queue.put(self.stop_signal , timeout=self.timeout ) def __iter__( self ): """simple docstring""" return self def A__ ( self ): """simple docstring""" lowercase = self.text_queue.get(timeout=self.timeout ) if value == self.stop_signal: raise StopIteration() else: return value
197
1
from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class lowerCamelCase__ : '''simple docstring''' def __init__(self ,__lowerCamelCase ,__lowerCamelCase=13 ,__lowerCamelCase=7 ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=99 ,__lowerCamelCase=32 ,__lowerCamelCase=2 ,__lowerCamelCase=4 ,__lowerCamelCase=37 ,__lowerCamelCase="gelu" ,__lowerCamelCase=0.1 ,__lowerCamelCase=0.1 ,__lowerCamelCase=5_12 ,__lowerCamelCase=16 ,__lowerCamelCase=2 ,__lowerCamelCase=0.02 ,__lowerCamelCase=3 ,__lowerCamelCase=4 ,__lowerCamelCase=None ,__lowerCamelCase=10_00 ,) -> str: """simple docstring""" lowerCAmelCase__ : List[str] = parent lowerCAmelCase__ : Union[str, Any] = batch_size lowerCAmelCase__ : Any = seq_length lowerCAmelCase__ : List[str] = is_training lowerCAmelCase__ : Union[str, Any] = use_input_mask lowerCAmelCase__ : Optional[Any] = use_token_type_ids lowerCAmelCase__ : Any = use_labels lowerCAmelCase__ : int = vocab_size lowerCAmelCase__ : Dict = hidden_size lowerCAmelCase__ : Union[str, Any] = num_hidden_layers lowerCAmelCase__ : Tuple = num_attention_heads lowerCAmelCase__ : Optional[int] = intermediate_size lowerCAmelCase__ : Optional[Any] = hidden_act lowerCAmelCase__ : Optional[int] = hidden_dropout_prob lowerCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob lowerCAmelCase__ : Any = max_position_embeddings lowerCAmelCase__ : Union[str, Any] = type_vocab_size lowerCAmelCase__ : List[str] = type_sequence_label_size lowerCAmelCase__ : Dict = initializer_range lowerCAmelCase__ : int = num_labels lowerCAmelCase__ : Optional[Any] = num_choices lowerCAmelCase__ : Any = scope lowerCAmelCase__ : Optional[int] = range_bbox def lowerCAmelCase__ (self ) -> Union[str, Any]: """simple docstring""" lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) # convert bbox to numpy since TF does not support item assignment lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length, 4] ,self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: lowerCAmelCase__ : Optional[int] = bbox[i, j, 3] lowerCAmelCase__ : str = bbox[i, j, 1] lowerCAmelCase__ : Any = t if bbox[i, j, 2] < bbox[i, j, 0]: lowerCAmelCase__ : int = bbox[i, j, 2] lowerCAmelCase__ : str = bbox[i, j, 0] lowerCAmelCase__ : Optional[Any] = t lowerCAmelCase__ : Dict = tf.convert_to_tensor(__lowerCamelCase ) lowerCAmelCase__ : Tuple = None if self.use_input_mask: lowerCAmelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ : List[str] = None if self.use_token_type_ids: lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) lowerCAmelCase__ : Dict = None lowerCAmelCase__ : Dict = None lowerCAmelCase__ : Union[str, Any] = None if self.use_labels: lowerCAmelCase__ : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] ,self.num_choices ) lowerCAmelCase__ : Tuple = LayoutLMConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> List[str]: """simple docstring""" lowerCAmelCase__ : List[str] = TFLayoutLMModel(config=__lowerCamelCase ) lowerCAmelCase__ : int = model(__lowerCamelCase ,__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ) lowerCAmelCase__ : Union[str, Any] = model(__lowerCamelCase ,__lowerCamelCase ,token_type_ids=__lowerCamelCase ) lowerCAmelCase__ : Dict = model(__lowerCamelCase ,__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Any: """simple docstring""" lowerCAmelCase__ : str = TFLayoutLMForMaskedLM(config=__lowerCamelCase ) lowerCAmelCase__ : Union[str, Any] = model(__lowerCamelCase ,__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> List[Any]: """simple docstring""" lowerCAmelCase__ : str = self.num_labels lowerCAmelCase__ : Dict = TFLayoutLMForSequenceClassification(config=__lowerCamelCase ) lowerCAmelCase__ : Union[str, Any] = model(__lowerCamelCase ,__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> List[str]: """simple docstring""" lowerCAmelCase__ : Dict = self.num_labels lowerCAmelCase__ : List[str] = TFLayoutLMForTokenClassification(config=__lowerCamelCase ) lowerCAmelCase__ : List[str] = model(__lowerCamelCase ,__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Optional[int]: """simple docstring""" lowerCAmelCase__ : Optional[Any] = TFLayoutLMForQuestionAnswering(config=__lowerCamelCase ) lowerCAmelCase__ : Union[str, Any] = model(__lowerCamelCase ,__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def lowerCAmelCase__ (self ) -> Union[str, Any]: """simple docstring""" lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs() ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) : List[str] = config_and_inputs lowerCAmelCase__ : List[str] = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_tf class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase): '''simple docstring''' snake_case_ =( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) snake_case_ =( { """feature-extraction""": TFLayoutLMModel, """fill-mask""": TFLayoutLMForMaskedLM, """text-classification""": TFLayoutLMForSequenceClassification, """token-classification""": TFLayoutLMForTokenClassification, """zero-shot""": TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) snake_case_ =False snake_case_ =True snake_case_ =10 def lowerCAmelCase__ (self ) -> int: """simple docstring""" lowerCAmelCase__ : int = TFLayoutLMModelTester(self ) lowerCAmelCase__ : Union[str, Any] = ConfigTester(self ,config_class=__lowerCamelCase ,hidden_size=37 ) def lowerCAmelCase__ (self ) -> int: """simple docstring""" self.config_tester.run_common_tests() def lowerCAmelCase__ (self ) -> Tuple: """simple docstring""" lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def lowerCAmelCase__ (self ) -> int: """simple docstring""" lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase ) def lowerCAmelCase__ (self ) -> List[str]: """simple docstring""" lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase ) def lowerCAmelCase__ (self ) -> Dict: """simple docstring""" lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase ) def lowerCAmelCase__ (self ) -> Optional[int]: """simple docstring""" lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase ) @slow def lowerCAmelCase__ (self ) -> Union[str, Any]: """simple docstring""" for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ : Optional[Any] = TFLayoutLMModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @unittest.skip('''Onnx compliancy broke with TF 2.10''' ) def lowerCAmelCase__ (self ) -> Optional[Any]: """simple docstring""" pass def lowerCAmelCase__ ( ): '''simple docstring''' lowerCAmelCase__ : str = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]]) # noqa: E231 lowerCAmelCase__ : int = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],]) # noqa: E231 lowerCAmelCase__ : List[Any] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]]) # noqa: E231 lowerCAmelCase__ : Optional[Any] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]) # noqa: E231 # these are sequence labels (i.e. at the token level) lowerCAmelCase__ : int = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]]) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class lowerCamelCase__ ( unittest.TestCase): '''simple docstring''' @slow def lowerCAmelCase__ (self ) -> int: """simple docstring""" lowerCAmelCase__ : Optional[int] = TFLayoutLMModel.from_pretrained('''microsoft/layoutlm-base-uncased''' ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = prepare_layoutlm_batch_inputs() # forward pass lowerCAmelCase__ : List[Any] = model(input_ids=__lowerCamelCase ,bbox=__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ) # test the sequence output on [0, :3, :3] lowerCAmelCase__ : Optional[int] = tf.convert_to_tensor( [[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] ,) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] ,__lowerCamelCase ,atol=1e-3 ) ) # test the pooled output on [1, :3] lowerCAmelCase__ : Union[str, Any] = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] ,__lowerCamelCase ,atol=1e-3 ) ) @slow def lowerCAmelCase__ (self ) -> Optional[Any]: """simple docstring""" lowerCAmelCase__ : Optional[int] = TFLayoutLMForSequenceClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' ,num_labels=2 ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = prepare_layoutlm_batch_inputs() # forward pass lowerCAmelCase__ : Union[str, Any] = model( input_ids=__lowerCamelCase ,bbox=__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,labels=tf.convert_to_tensor([1, 1] ) ,) # test whether we get a loss as a scalar lowerCAmelCase__ : Optional[Any] = outputs.loss lowerCAmelCase__ : Optional[int] = (2,) self.assertEqual(loss.shape ,__lowerCamelCase ) # test the shape of the logits lowerCAmelCase__ : List[Any] = outputs.logits lowerCAmelCase__ : Tuple = (2, 2) self.assertEqual(logits.shape ,__lowerCamelCase ) @slow def lowerCAmelCase__ (self ) -> List[Any]: """simple docstring""" lowerCAmelCase__ : Tuple = TFLayoutLMForTokenClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' ,num_labels=13 ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = prepare_layoutlm_batch_inputs() # forward pass lowerCAmelCase__ : List[Any] = model( input_ids=__lowerCamelCase ,bbox=__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,labels=__lowerCamelCase ) # test the shape of the logits lowerCAmelCase__ : Dict = outputs.logits lowerCAmelCase__ : List[str] = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape ,__lowerCamelCase ) @slow def lowerCAmelCase__ (self ) -> int: """simple docstring""" lowerCAmelCase__ : str = TFLayoutLMForQuestionAnswering.from_pretrained('''microsoft/layoutlm-base-uncased''' ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = prepare_layoutlm_batch_inputs() # forward pass lowerCAmelCase__ : Union[str, Any] = model(input_ids=__lowerCamelCase ,bbox=__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ) # test the shape of the logits lowerCAmelCase__ : int = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape ,__lowerCamelCase ) self.assertEqual(outputs.end_logits.shape ,__lowerCamelCase )
94
import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase): '''simple docstring''' snake_case_ =KandinskyVaaPriorPipeline snake_case_ =["""prompt"""] snake_case_ =["""prompt""", """negative_prompt"""] snake_case_ =[ """num_images_per_prompt""", """generator""", """num_inference_steps""", """latents""", """negative_prompt""", """guidance_scale""", """output_type""", """return_dict""", ] snake_case_ =False @property def lowerCAmelCase__ (self ) -> Any: """simple docstring""" return 32 @property def lowerCAmelCase__ (self ) -> List[str]: """simple docstring""" return 32 @property def lowerCAmelCase__ (self ) -> Tuple: """simple docstring""" return self.time_input_dim @property def lowerCAmelCase__ (self ) -> Optional[int]: """simple docstring""" return self.time_input_dim * 4 @property def lowerCAmelCase__ (self ) -> Dict: """simple docstring""" return 1_00 @property def lowerCAmelCase__ (self ) -> Union[str, Any]: """simple docstring""" lowerCAmelCase__ : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def lowerCAmelCase__ (self ) -> str: """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase__ : str = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,) return CLIPTextModelWithProjection(__lowerCamelCase ) @property def lowerCAmelCase__ (self ) -> int: """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase__ : List[str] = { '''num_attention_heads''': 2, '''attention_head_dim''': 12, '''embedding_dim''': self.text_embedder_hidden_size, '''num_layers''': 1, } lowerCAmelCase__ : List[str] = PriorTransformer(**__lowerCamelCase ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 lowerCAmelCase__ : int = nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def lowerCAmelCase__ (self ) -> List[str]: """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase__ : Any = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size ,image_size=2_24 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=14 ,) lowerCAmelCase__ : int = CLIPVisionModelWithProjection(__lowerCamelCase ) return model @property def lowerCAmelCase__ (self ) -> Any: """simple docstring""" lowerCAmelCase__ : Optional[Any] = CLIPImageProcessor( crop_size=2_24 ,do_center_crop=__lowerCamelCase ,do_normalize=__lowerCamelCase ,do_resize=__lowerCamelCase ,image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] ,image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] ,resample=3 ,size=2_24 ,) return image_processor def lowerCAmelCase__ (self ) -> Any: """simple docstring""" lowerCAmelCase__ : List[Any] = self.dummy_prior lowerCAmelCase__ : List[Any] = self.dummy_image_encoder lowerCAmelCase__ : Optional[Any] = self.dummy_text_encoder lowerCAmelCase__ : Optional[int] = self.dummy_tokenizer lowerCAmelCase__ : str = self.dummy_image_processor lowerCAmelCase__ : Union[str, Any] = UnCLIPScheduler( variance_type='''fixed_small_log''' ,prediction_type='''sample''' ,num_train_timesteps=10_00 ,clip_sample=__lowerCamelCase ,clip_sample_range=10.0 ,) lowerCAmelCase__ : int = { '''prior''': prior, '''image_encoder''': image_encoder, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''scheduler''': scheduler, '''image_processor''': image_processor, } return components def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase=0 ) -> int: """simple docstring""" if str(__lowerCamelCase ).startswith('''mps''' ): lowerCAmelCase__ : Dict = torch.manual_seed(__lowerCamelCase ) else: lowerCAmelCase__ : Any = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) lowerCAmelCase__ : Optional[Any] = { '''prompt''': '''horse''', '''generator''': generator, '''guidance_scale''': 4.0, '''num_inference_steps''': 2, '''output_type''': '''np''', } return inputs def lowerCAmelCase__ (self ) -> List[str]: """simple docstring""" lowerCAmelCase__ : str = '''cpu''' lowerCAmelCase__ : Optional[int] = self.get_dummy_components() lowerCAmelCase__ : int = self.pipeline_class(**__lowerCamelCase ) lowerCAmelCase__ : int = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) lowerCAmelCase__ : List[Any] = pipe(**self.get_dummy_inputs(__lowerCamelCase ) ) lowerCAmelCase__ : Union[str, Any] = output.image_embeds lowerCAmelCase__ : Tuple = pipe( **self.get_dummy_inputs(__lowerCamelCase ) ,return_dict=__lowerCamelCase ,)[0] lowerCAmelCase__ : Union[str, Any] = image[0, -10:] lowerCAmelCase__ : str = image_from_tuple[0, -10:] assert image.shape == (1, 32) lowerCAmelCase__ : int = np.array( [-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def lowerCAmelCase__ (self ) -> List[Any]: """simple docstring""" lowerCAmelCase__ : List[Any] = torch_device == '''cpu''' lowerCAmelCase__ : List[Any] = True lowerCAmelCase__ : Optional[int] = False self._test_inference_batch_single_identical( test_max_difference=__lowerCamelCase ,relax_max_difference=__lowerCamelCase ,test_mean_pixel_difference=__lowerCamelCase ,) @skip_mps def lowerCAmelCase__ (self ) -> Any: """simple docstring""" lowerCAmelCase__ : int = torch_device == '''cpu''' lowerCAmelCase__ : int = False self._test_attention_slicing_forward_pass( test_max_difference=__lowerCamelCase ,test_mean_pixel_difference=__lowerCamelCase ,)
94
1
"""simple docstring""" def _A ( lowercase ): """simple docstring""" if not head: return True # split the list to two parts a , a =head.next, head while fast and fast.next: a =fast.next.next a =slow.next a =slow.next a =None # Don't forget here! But forget still works! # reverse the second part a =None while second: a =second.next a =node a =second a =nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False a =node.next a =head.next return True def _A ( lowercase ): """simple docstring""" if not head or not head.next: return True # 1. Get the midpoint (slow) a =a =a =head while fast and fast.next: a , a =fast.next.next, slow.next # 2. Push the second half into the stack a =[slow.val] while slow.next: a =slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False a =cur.next return True def _A ( lowercase ): """simple docstring""" if not head or not head.next: return True a ={} a =0 while head: if head.val in d: d[head.val].append(lowercase ) else: a =[pos] a =head.next pos += 1 a =pos - 1 a =0 for v in d.values(): if len(lowercase ) % 2 != 0: middle += 1 else: a =0 for i in range(0 , len(lowercase ) ): if v[i] + v[len(lowercase ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
81
from collections import namedtuple import requests from lxml import html # type: ignore _A : Any = namedtuple('covid_data', 'cases deaths recovered') def _a ( UpperCAmelCase = "https://www.worldometers.info/coronavirus/" ) -> covid_data: """simple docstring""" lowerCamelCase__ : Optional[Any] = '''//div[@class = "maincounter-number"]/span/text()''' return covid_data(*html.fromstring(requests.get(UpperCAmelCase ).content ).xpath(UpperCAmelCase ) ) _A : Dict = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}' print(fmt.format(*covid_stats()))
142
0
"""simple docstring""" import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers _lowercase : List[Any] = 'python tqdm regex requests packaging filelock numpy tokenizers'.split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append('dataclasses') if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append('importlib_metadata') for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""") def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :Dict=None ): require_version(deps[pkg] , snake_case_ )
86
"""simple docstring""" from __future__ import annotations def lowercase__ ( snake_case_ :float , snake_case_ :float , snake_case_ :float ): if days_between_payments <= 0: raise ValueError('''days_between_payments must be > 0''' ) if daily_interest_rate < 0: raise ValueError('''daily_interest_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * daily_interest_rate * days_between_payments def lowercase__ ( snake_case_ :float , snake_case_ :float , snake_case_ :float , ): if number_of_compounding_periods <= 0: raise ValueError('''number_of_compounding_periods must be > 0''' ) if nominal_annual_interest_rate_percentage < 0: raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * ( (1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods - 1 ) def lowercase__ ( snake_case_ :float , snake_case_ :float , snake_case_ :float , ): if number_of_years <= 0: raise ValueError('''number_of_years must be > 0''' ) if nominal_annual_percentage_rate < 0: raise ValueError('''nominal_annual_percentage_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return compound_interest( snake_case_ , nominal_annual_percentage_rate / 365 , number_of_years * 365 ) if __name__ == "__main__": import doctest doctest.testmod()
86
1
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = { '''en''': '''Machine learning is great, isn\'t it?''', '''ru''': '''Машинное обучение - это здорово, не так ли?''', '''de''': '''Maschinelles Lernen ist großartig, oder?''', } # BLUE scores as follows: # "pair": [fairseq, transformers] snake_case_ = { '''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''], '''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''], '''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''], '''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''], } snake_case_ = F'''{src_lang}-{tgt_lang}''' snake_case_ = F''' --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt19 - facebook license: apache-2.0 datasets: - wmt19 metrics: - bleu --- # FSMT ## Model description This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}. For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616). The abbreviation FSMT stands for FairSeqMachineTranslation All four models are available: * [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) * [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en) * [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de) * [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "facebook/wmt19-{src_lang}-{tgt_lang}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias - The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981) ## Training data Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616). ## Eval results pair | fairseq | transformers -------|---------|---------- {pair} | {scores[pair][0]} | {scores[pair][1]} The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support: - model ensemble, therefore the best performing checkpoint was ported (``model4.pt``). - re-ranking The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=15 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`. ## Data Sources - [training, etc.](http://www.statmt.org/wmt19/) - [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561) ### BibTeX entry and citation info ```bibtex @inproceedings{{..., year={{2020}}, title={{Facebook FAIR\'s WMT19 News Translation Task Submission}}, author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}}, booktitle={{Proc. of WMT}}, }} ``` ## TODO - port model ensemble (fairseq uses 4 model checkpoints) ''' os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''README.md''' ) print(F'''Generating {path}''' ) with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f: f.write(SCREAMING_SNAKE_CASE__ ) # make sure we are under the root of the project lowerCAmelCase_ = Path(__file__).resolve().parent.parent.parent lowerCAmelCase_ = repo_dir / '''model_cards''' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = model_name.split('''-''') lowerCAmelCase_ = model_cards_dir / '''facebook''' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
8
import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {'''vocab_file''': '''spiece.model'''} lowerCAmelCase_ = { '''vocab_file''': { '''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''', '''google/bigbird-roberta-large''': ( '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model''' ), } } lowerCAmelCase_ = { '''google/bigbird-roberta-base''': 40_96, '''google/bigbird-roberta-large''': 40_96, '''google/bigbird-base-trivia-itc''': 40_96, } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE : List[Any] = ["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE : List[int] = [] def __init__( self : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict="<unk>" , _UpperCamelCase : List[str]="<s>" , _UpperCamelCase : Tuple="</s>" , _UpperCamelCase : Any="<pad>" , _UpperCamelCase : Any="[SEP]" , _UpperCamelCase : Optional[Any]="[MASK]" , _UpperCamelCase : Any="[CLS]" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Dict , ) ->None: snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else bos_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else eos_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else unk_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else pad_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else cls_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else sep_token # Mask token behave like a normal word, i.e. include the space before it snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , sep_token=_UpperCamelCase , mask_token=_UpperCamelCase , cls_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , ) snake_case_ = vocab_file snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_UpperCamelCase ) @property def snake_case__( self : str ) ->List[Any]: return self.sp_model.get_piece_size() def snake_case__( self : int ) ->Union[str, Any]: snake_case_ = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Tuple ) ->Any: snake_case_ = self.__dict__.copy() snake_case_ = None return state def __setstate__( self : str , _UpperCamelCase : List[Any] ) ->List[str]: snake_case_ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): snake_case_ = {} snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def snake_case__( self : Optional[int] , _UpperCamelCase : str ) ->List[str]: return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase ) def snake_case__( self : str , _UpperCamelCase : List[str] ) ->Tuple: return self.sp_model.piece_to_id(_UpperCamelCase ) def snake_case__( self : Union[str, Any] , _UpperCamelCase : str ) ->List[Any]: snake_case_ = self.sp_model.IdToPiece(_UpperCamelCase ) return token def snake_case__( self : Dict , _UpperCamelCase : Optional[int] ) ->List[str]: snake_case_ = [] snake_case_ = '''''' snake_case_ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_UpperCamelCase ) + token snake_case_ = True snake_case_ = [] else: current_sub_tokens.append(_UpperCamelCase ) snake_case_ = False out_string += self.sp_model.decode(_UpperCamelCase ) return out_string.strip() def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : bool = False , _UpperCamelCase : bool = None , _UpperCamelCase : bool = True , **_UpperCamelCase : List[str] , ) ->str: snake_case_ = kwargs.pop('''use_source_tokenizer''' , _UpperCamelCase ) snake_case_ = self.convert_ids_to_tokens(_UpperCamelCase , skip_special_tokens=_UpperCamelCase ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 snake_case_ = [] snake_case_ = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) ) snake_case_ = [] sub_texts.append(_UpperCamelCase ) else: current_sub_text.append(_UpperCamelCase ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: snake_case_ = re.sub(R''' (\[(MASK|SEP)\])''' , R'''\1''' , ''' '''.join(_UpperCamelCase ) ) else: snake_case_ = ''''''.join(_UpperCamelCase ) snake_case_ = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: snake_case_ = self.clean_up_tokenization(_UpperCamelCase ) return clean_text else: return text def snake_case__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]: if not os.path.isdir(_UpperCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case_ = os.path.join( _UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCamelCase , '''wb''' ) as fi: snake_case_ = self.sp_model.serialized_model_proto() fi.write(_UpperCamelCase ) return (out_vocab_file,) def snake_case__( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def snake_case__( self : List[str] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) ->List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_UpperCamelCase )) + [1] return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1] def snake_case__( self : List[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
8
1
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: if height >= 1: move_tower(height - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) move_disk(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) move_tower(height - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: print("""moving disk from""" , _SCREAMING_SNAKE_CASE , """to""" , _SCREAMING_SNAKE_CASE ) def _a ( ) -> Dict: snake_case_ = int(input("""Height of hanoi: """ ).strip() ) move_tower(_SCREAMING_SNAKE_CASE , """A""" , """B""" , """C""" ) if __name__ == "__main__": main()
233
"""simple docstring""" import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets __SCREAMING_SNAKE_CASE : Dict = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n' __SCREAMING_SNAKE_CASE : Optional[int] = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n' __SCREAMING_SNAKE_CASE : Tuple = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class __A (datasets.Metric): '''simple docstring''' def lowerCAmelCase ( self : Tuple ) ->Union[str, Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[ """https://arxiv.org/abs/2102.01454""", """https://github.com/krishnap25/mauve""", ] , ) def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : int="auto" , UpperCAmelCase_ : Dict=-1 , UpperCAmelCase_ : Optional[Any]=0.9 , UpperCAmelCase_ : Dict=5 , UpperCAmelCase_ : Optional[int]=500 , UpperCAmelCase_ : Any="gpt2-large" , UpperCAmelCase_ : Union[str, Any]=-1 , UpperCAmelCase_ : Optional[Any]=1_024 , UpperCAmelCase_ : Dict=25 , UpperCAmelCase_ : Optional[Any]=5 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Union[str, Any]=25 , ) ->List[Any]: """simple docstring""" snake_case_ = compute_mauve( p_text=UpperCAmelCase_ , q_text=UpperCAmelCase_ , p_features=UpperCAmelCase_ , q_features=UpperCAmelCase_ , p_tokens=UpperCAmelCase_ , q_tokens=UpperCAmelCase_ , num_buckets=UpperCAmelCase_ , pca_max_data=UpperCAmelCase_ , kmeans_explained_var=UpperCAmelCase_ , kmeans_num_redo=UpperCAmelCase_ , kmeans_max_iter=UpperCAmelCase_ , featurize_model_name=UpperCAmelCase_ , device_id=UpperCAmelCase_ , max_text_length=UpperCAmelCase_ , divergence_curve_discretization_size=UpperCAmelCase_ , mauve_scaling_factor=UpperCAmelCase_ , verbose=UpperCAmelCase_ , seed=UpperCAmelCase_ , ) return out
233
1
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class _UpperCAmelCase ( unittest.TestCase ): a__ : Dict = ViTImageProcessor if is_vision_available() else None @property def a ( self : List[str] ): return self.image_processor_tester.prepare_image_processor_dict() def a ( self : Tuple ): __UpperCAmelCase = (3, 32, 1_28) __UpperCAmelCase = tempfile.mkdtemp() # fmt: off __UpperCAmelCase = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z'''] # fmt: on __UpperCAmelCase = dict(zip(_A , range(len(_A ) ) ) ) __UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_A ) + '''\n''' ) __UpperCAmelCase = { '''do_normalize''': False, '''do_resize''': True, '''image_processor_type''': '''ViTImageProcessor''', '''resample''': 3, '''size''': {'''height''': 32, '''width''': 1_28}, } __UpperCAmelCase = os.path.join(self.tmpdirname , _A ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_A , _A ) def a ( self : Optional[Any] , **_lowercase : Any ): return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_A ) def a ( self : Union[str, Any] , **_lowercase : Union[str, Any] ): return ViTImageProcessor.from_pretrained(self.tmpdirname , **_A ) def a ( self : Union[str, Any] ): shutil.rmtree(self.tmpdirname ) def a ( self : List[str] ): __UpperCAmelCase = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta ) __UpperCAmelCase = Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) return image_input def a ( self : Dict ): __UpperCAmelCase = self.get_tokenizer() __UpperCAmelCase = self.get_image_processor() __UpperCAmelCase = MgpstrProcessor(tokenizer=_A , image_processor=_A ) processor.save_pretrained(self.tmpdirname ) __UpperCAmelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_A ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , _A ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , _A ) def a ( self : Any ): __UpperCAmelCase = self.get_tokenizer() __UpperCAmelCase = self.get_image_processor() __UpperCAmelCase = MgpstrProcessor(tokenizer=_A , image_processor=_A ) processor.save_pretrained(self.tmpdirname ) __UpperCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __UpperCAmelCase = self.get_image_processor(do_normalize=_A , padding_value=1.0 ) __UpperCAmelCase = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_A , padding_value=1.0 ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , _A ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _A ) def a ( self : Union[str, Any] ): __UpperCAmelCase = self.get_image_processor() __UpperCAmelCase = self.get_tokenizer() __UpperCAmelCase = MgpstrProcessor(tokenizer=_A , image_processor=_A ) __UpperCAmelCase = self.prepare_image_inputs() __UpperCAmelCase = image_processor(_A , return_tensors='''np''' ) __UpperCAmelCase = processor(images=_A , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def a ( self : Union[str, Any] ): __UpperCAmelCase = self.get_image_processor() __UpperCAmelCase = self.get_tokenizer() __UpperCAmelCase = MgpstrProcessor(tokenizer=_A , image_processor=_A ) __UpperCAmelCase = '''test''' __UpperCAmelCase = processor(text=_A ) __UpperCAmelCase = tokenizer(_A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def a ( self : Optional[int] ): __UpperCAmelCase = self.get_image_processor() __UpperCAmelCase = self.get_tokenizer() __UpperCAmelCase = MgpstrProcessor(tokenizer=_A , image_processor=_A ) __UpperCAmelCase = '''test''' __UpperCAmelCase = self.prepare_image_inputs() __UpperCAmelCase = processor(text=_A , images=_A ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] ) # test if it raises when no input is passed with pytest.raises(_A ): processor() def a ( self : List[Any] ): __UpperCAmelCase = self.get_image_processor() __UpperCAmelCase = self.get_tokenizer() __UpperCAmelCase = MgpstrProcessor(tokenizer=_A , image_processor=_A ) __UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] __UpperCAmelCase = processor.char_decode(_A ) __UpperCAmelCase = tokenizer.batch_decode(_A ) __UpperCAmelCase = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok] self.assertListEqual(_A , _A ) def a ( self : Union[str, Any] ): __UpperCAmelCase = self.get_image_processor() __UpperCAmelCase = self.get_tokenizer() __UpperCAmelCase = MgpstrProcessor(tokenizer=_A , image_processor=_A ) __UpperCAmelCase = None __UpperCAmelCase = self.prepare_image_inputs() __UpperCAmelCase = processor(text=_A , images=_A ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) def a ( self : Dict ): __UpperCAmelCase = self.get_image_processor() __UpperCAmelCase = self.get_tokenizer() __UpperCAmelCase = MgpstrProcessor(tokenizer=_A , image_processor=_A ) __UpperCAmelCase = torch.randn(1 , 27 , 38 ) __UpperCAmelCase = torch.randn(1 , 27 , 5_02_57 ) __UpperCAmelCase = torch.randn(1 , 27 , 3_05_22 ) __UpperCAmelCase = processor.batch_decode([char_input, bpe_input, wp_input] ) self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
332
'''simple docstring''' _UpperCamelCase : Tuple = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' _UpperCamelCase : Any = [{'type': 'code', 'content': INSTALL_CONTENT}] _UpperCamelCase : Dict = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
304
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor lowerCAmelCase :Tuple = logging.get_logger(__name__) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : Any , *_A : List[Any] , **_A : str ) -> None: warnings.warn( 'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use BeitImageProcessor instead.' , _A , ) super().__init__(*_A , **_A )
363
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : int = StableDiffusionXLImgaImgPipeline A_ : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""} A_ : Optional[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""} A_ : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS A_ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS A_ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS def __lowerCAmelCase ( self : Dict ) -> Optional[int]: torch.manual_seed(0 ) __magic_name__ : int = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=_A , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , ) __magic_name__ : str = EulerDiscreteScheduler( beta_start=0.0_0085 , beta_end=0.012 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , ) torch.manual_seed(0 ) __magic_name__ : str = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) __magic_name__ : Any = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=32 , ) __magic_name__ : Dict = CLIPTextModel(_A ) __magic_name__ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=_A ) __magic_name__ : Optional[Any] = CLIPTextModelWithProjection(_A ) __magic_name__ : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=_A ) __magic_name__ : List[Any] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'text_encoder_2': text_encoder_a, 'tokenizer_2': tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def __lowerCAmelCase ( self : List[Any] , _A : List[str] , _A : Any=0 ) -> Union[str, Any]: __magic_name__ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) __magic_name__ : Dict = image / 2 + 0.5 if str(_A ).startswith('mps' ): __magic_name__ : Any = torch.manual_seed(_A ) else: __magic_name__ : int = torch.Generator(device=_A ).manual_seed(_A ) __magic_name__ : List[Any] = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 5.0, 'output_type': 'numpy', 'strength': 0.75, } return inputs def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: __magic_name__ : str = 'cpu' # ensure determinism for the device-dependent torch.Generator __magic_name__ : str = self.get_dummy_components() __magic_name__ : Any = StableDiffusionXLImgaImgPipeline(**_A ) __magic_name__ : List[Any] = sd_pipe.to(_A ) sd_pipe.set_progress_bar_config(disable=_A ) __magic_name__ : Any = self.get_dummy_inputs(_A ) __magic_name__ : Optional[int] = sd_pipe(**_A ).images __magic_name__ : str = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __magic_name__ : Any = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]: super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def __lowerCAmelCase ( self : List[Any] ) -> int: super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def __lowerCAmelCase ( self : Any ) -> Union[str, Any]: pass def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]: __magic_name__ : Dict = self.get_dummy_components() __magic_name__ : Optional[Any] = StableDiffusionXLImgaImgPipeline(**_A ) __magic_name__ : List[Any] = sd_pipe.to(_A ) __magic_name__ : str = sd_pipe.to(_A ) sd_pipe.set_progress_bar_config(disable=_A ) # forward without prompt embeds __magic_name__ : Union[str, Any] = self.get_dummy_inputs(_A ) __magic_name__ : Union[str, Any] = 3 * ['this is a negative prompt'] __magic_name__ : List[str] = negative_prompt __magic_name__ : int = 3 * [inputs['prompt']] __magic_name__ : Tuple = sd_pipe(**_A ) __magic_name__ : str = output.images[0, -3:, -3:, -1] # forward with prompt embeds __magic_name__ : Optional[Any] = self.get_dummy_inputs(_A ) __magic_name__ : Tuple = 3 * ['this is a negative prompt'] __magic_name__ : List[str] = 3 * [inputs.pop('prompt' )] ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) : List[Any] = sd_pipe.encode_prompt(_A , negative_prompt=_A ) __magic_name__ : Tuple = sd_pipe( **_A , prompt_embeds=_A , negative_prompt_embeds=_A , pooled_prompt_embeds=_A , negative_pooled_prompt_embeds=_A , ) __magic_name__ : int = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @slow @require_torch_gpu class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self : List[Any] ) -> Tuple: super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self : str , _A : Optional[int] , _A : Optional[Any]="cpu" , _A : List[str]=torch.floataa , _A : Any=0 ) -> str: __magic_name__ : List[str] = torch.Generator(device=_A ).manual_seed(_A ) __magic_name__ : Optional[Any] = np.random.RandomState(_A ).standard_normal((1, 4, 64, 64) ) __magic_name__ : Union[str, Any] = torch.from_numpy(_A ).to(device=_A , dtype=_A ) __magic_name__ : Optional[int] = { 'prompt': 'a photograph of an astronaut riding a horse', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict: __magic_name__ : str = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) __magic_name__ : Optional[int] = self.get_inputs(_A ) __magic_name__ : Union[str, Any] = pipe(**_A ).images __magic_name__ : Any = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) __magic_name__ : List[Any] = np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506] ) assert np.abs(image_slice - expected_slice ).max() < 7E-3
275
0
"""simple docstring""" def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : list[int] ): '''simple docstring''' if not numbers: return 0 if not isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) or not all( isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for number in numbers ): raise ValueError("""numbers must be an iterable of integers""" ) lowerCAmelCase = lowerCAmelCase = lowerCAmelCase = numbers[0] for i in range(1 , len(SCREAMING_SNAKE_CASE ) ): # update the maximum and minimum subarray products lowerCAmelCase = numbers[i] if number < 0: lowerCAmelCase , lowerCAmelCase = min_till_now, max_till_now lowerCAmelCase = max(SCREAMING_SNAKE_CASE , max_till_now * number ) lowerCAmelCase = min(SCREAMING_SNAKE_CASE , min_till_now * number ) # update the maximum product found till now lowerCAmelCase = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return max_prod
46
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ "FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FocalNetForImageClassification", "FocalNetForMaskedImageModeling", "FocalNetBackbone", "FocalNetModel", "FocalNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
46
1
import unittest from transformers import AutoTokenizer, FalconConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, ) class _UpperCAmelCase : '''simple docstring''' def __init__( self : List[str] , lowercase_ : Optional[int] , lowercase_ : List[Any]=3 , lowercase_ : List[str]=7 , lowercase_ : int=True , lowercase_ : int=True , lowercase_ : str=False , lowercase_ : Optional[int]=True , lowercase_ : Union[str, Any]=99 , lowercase_ : str=32 , lowercase_ : List[str]=5 , lowercase_ : Optional[int]=4 , lowercase_ : int=37 , lowercase_ : Optional[Any]="gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Optional[Any]=512 , lowercase_ : Dict=16 , lowercase_ : List[Any]=2 , lowercase_ : Optional[Any]=0.02 , lowercase_ : int=3 , lowercase_ : Any=4 , lowercase_ : Union[str, Any]=None , ) -> Optional[int]: """simple docstring""" _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = seq_length _UpperCamelCase = is_training _UpperCamelCase = use_input_mask _UpperCamelCase = use_token_type_ids _UpperCamelCase = use_labels _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = type_vocab_size _UpperCamelCase = type_sequence_label_size _UpperCamelCase = initializer_range _UpperCamelCase = num_labels _UpperCamelCase = num_choices _UpperCamelCase = scope def __UpperCAmelCase ( self : Union[str, Any]) -> List[str]: """simple docstring""" _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _UpperCamelCase = None if self.use_input_mask: _UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length]) _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None if self.use_labels: _UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size) _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) _UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices) _UpperCamelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCAmelCase ( self : Dict) -> Optional[Any]: """simple docstring""" return FalconConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=__A , ) def __UpperCAmelCase ( self : int , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : int) -> Union[str, Any]: """simple docstring""" _UpperCamelCase = FalconModel(config=__A) model.to(__A) model.eval() _UpperCamelCase = model(__A , attention_mask=__A) _UpperCamelCase = model(__A) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def __UpperCAmelCase ( self : List[Any] , lowercase_ : List[str] , lowercase_ : str , lowercase_ : str , lowercase_ : int , lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , ) -> int: """simple docstring""" _UpperCamelCase = True _UpperCamelCase = FalconModel(__A) model.to(__A) model.eval() _UpperCamelCase = model( __A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , ) _UpperCamelCase = model( __A , attention_mask=__A , encoder_hidden_states=__A , ) _UpperCamelCase = model(__A , attention_mask=__A) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def __UpperCAmelCase ( self : Tuple , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : List[str] , lowercase_ : Any , ) -> Dict: """simple docstring""" _UpperCamelCase = FalconForCausalLM(config=__A) model.to(__A) model.eval() _UpperCamelCase = model(__A , attention_mask=__A , labels=__A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def __UpperCAmelCase ( self : Dict , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : Tuple , ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase = True _UpperCamelCase = True _UpperCamelCase = FalconForCausalLM(config=__A) model.to(__A) model.eval() # first forward pass _UpperCamelCase = model( __A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , use_cache=__A , ) _UpperCamelCase = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids _UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size) _UpperCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2) # append to next input_ids and _UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1) _UpperCamelCase = torch.cat([input_mask, next_mask] , dim=-1) _UpperCamelCase = model( __A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , output_hidden_states=__A , )["""hidden_states"""][0] _UpperCamelCase = model( __A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , past_key_values=__A , output_hidden_states=__A , )["""hidden_states"""][0] # select random slice _UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1]).item() _UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach() _UpperCamelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__A , __A , atol=1e-3)) def __UpperCAmelCase ( self : Optional[Any]) -> List[str]: """simple docstring""" _UpperCamelCase = self.prepare_config_and_inputs() ( _UpperCamelCase ) = config_and_inputs _UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _UpperCAmelCase ( A__, A__, A__, unittest.TestCase ): '''simple docstring''' __A = ( ( FalconModel, FalconForCausalLM, FalconForSequenceClassification, FalconForTokenClassification, FalconForQuestionAnswering, ) if is_torch_available() else () ) __A = (FalconForCausalLM,) if is_torch_available() else () __A = ( { "feature-extraction": FalconModel, "text-classification": FalconForSequenceClassification, "text-generation": FalconForCausalLM, "question-answering": FalconForQuestionAnswering, "token-classification": FalconForTokenClassification, "zero-shot": FalconForSequenceClassification, } if is_torch_available() else {} ) __A = False __A = False def __UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]: """simple docstring""" _UpperCamelCase = FalconModelTester(self) _UpperCamelCase = ConfigTester(self , config_class=__A , hidden_size=37) def __UpperCAmelCase ( self : Tuple) -> Optional[Any]: """simple docstring""" self.config_tester.run_common_tests() def __UpperCAmelCase ( self : Tuple) -> Any: """simple docstring""" _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A) def __UpperCAmelCase ( self : List[Any]) -> Dict: """simple docstring""" _UpperCamelCase = self.model_tester.prepare_config_and_inputs() for alibi in [True, False]: _UpperCamelCase = alibi self.model_tester.create_and_check_model(__A , *__A) def __UpperCAmelCase ( self : Union[str, Any]) -> str: """simple docstring""" _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCamelCase = 3 _UpperCamelCase = input_dict["""input_ids"""] _UpperCamelCase = input_ids.ne(1).to(__A) _UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) _UpperCamelCase = FalconForSequenceClassification(__A) model.to(__A) model.eval() _UpperCamelCase = model(__A , attention_mask=__A , labels=__A) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def __UpperCAmelCase ( self : Optional[Any]) -> str: """simple docstring""" _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCamelCase = 3 _UpperCamelCase = """single_label_classification""" _UpperCamelCase = input_dict["""input_ids"""] _UpperCamelCase = input_ids.ne(1).to(__A) _UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) _UpperCamelCase = FalconForSequenceClassification(__A) model.to(__A) model.eval() _UpperCamelCase = model(__A , attention_mask=__A , labels=__A) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def __UpperCAmelCase ( self : List[Any]) -> Optional[int]: """simple docstring""" _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCamelCase = input_dict["""input_ids"""] _UpperCamelCase = FalconForCausalLM(__A) model.to(__A) model.eval() _UpperCamelCase = model(__A , use_cache=__A) _UpperCamelCase = input_ids.shape[0] _UpperCamelCase = model._convert_to_rw_cache(result.past_key_values) _UpperCamelCase = model._convert_cache_to_standard_format(__A , __A) for layer in range(len(__A)): for tensor_idx in range(2): self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3) self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4) self.assertTrue( torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx])) def __UpperCAmelCase ( self : Union[str, Any]) -> List[str]: """simple docstring""" _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCamelCase = 3 _UpperCamelCase = """multi_label_classification""" _UpperCamelCase = input_dict["""input_ids"""] _UpperCamelCase = input_ids.ne(1).to(__A) _UpperCamelCase = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float) _UpperCamelCase = FalconForSequenceClassification(__A) model.to(__A) model.eval() _UpperCamelCase = model(__A , attention_mask=__A , labels=__A) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def __UpperCAmelCase ( self : int) -> Tuple: """simple docstring""" for model_class in self.all_generative_model_classes: _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() # If it doesn't support cache, pass the test if not hasattr(__A , "use_cache"): return _UpperCamelCase = model_class(__A).to(__A) if "use_cache" not in inputs: _UpperCamelCase = True _UpperCamelCase = model(**__A) # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) if "past_key_values" not in outputs: return _UpperCamelCase = ( getattr(__A , "decoder_layers" , __A) or getattr(__A , "num_decoder_layers" , __A) or config.num_hidden_layers ) _UpperCamelCase = getattr(__A , "num_kv_heads" , config.num_attention_heads) _UpperCamelCase = getattr(__A , "d_model" , config.hidden_size) _UpperCamelCase = embed_dim // num_attention_heads _UpperCamelCase = outputs["""past_key_values"""] self.assertEqual(len(__A) , __A) _UpperCamelCase = inputs["""input_ids"""].shape for i in range(__A): if config.new_decoder_architecture: _UpperCamelCase = config.num_attention_heads elif config.multi_query: _UpperCamelCase = 1 self.assertEqual(len(past_kv[0]) , 2) # K V for the decoder = 2 self.assertEqual( past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim)) self.assertEqual( past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim)) @require_torch class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def __UpperCAmelCase ( self : List[str]) -> Union[str, Any]: """simple docstring""" _UpperCamelCase = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b") _UpperCamelCase = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b") model.eval() model.to(__A) _UpperCamelCase = tokenizer("My favorite food is" , return_tensors="pt").to(__A) _UpperCamelCase = ( """My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.""" ) _UpperCamelCase = model.generate(**__A , do_sample=__A , max_new_tokens=19) _UpperCamelCase = tokenizer.batch_decode(__A)[0] self.assertEqual(__A , __A) @slow def __UpperCAmelCase ( self : Dict) -> Optional[Any]: """simple docstring""" for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]: _UpperCamelCase = AutoTokenizer.from_pretrained(__A) _UpperCamelCase = FalconForCausalLM.from_pretrained(__A) model.eval() model.to(__A) _UpperCamelCase = tokenizer("My favorite food is" , return_tensors="pt").to(__A) # We just test that these run without errors - the models are randomly initialized # and so the actual text outputs will be garbage model.generate(**__A , do_sample=__A , max_new_tokens=4) model.generate(**__A , do_sample=__A , max_new_tokens=4) model.generate(**__A , num_beams=2 , max_new_tokens=4) @slow def __UpperCAmelCase ( self : List[str]) -> Union[str, Any]: """simple docstring""" with torch.no_grad(): for repo in [ "Rocketknight1/falcon-rw-1b", "Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b", ]: _UpperCamelCase = AutoTokenizer.from_pretrained(__A) _UpperCamelCase = FalconForCausalLM.from_pretrained(__A) model.eval() model.to(device=__A) _UpperCamelCase = tokenizer("My favorite food is" , return_tensors="pt").to(__A) # Test results are the same with and without cache _UpperCamelCase = model.generate(**__A , do_sample=__A , max_new_tokens=20 , use_cache=__A) _UpperCamelCase = model.generate(**__A , do_sample=__A , max_new_tokens=20 , use_cache=__A) self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0)
351
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: lowerCamelCase__ = None lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} lowerCamelCase__ = { '''vocab_file''': { '''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''', '''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json''', '''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json''', }, } lowerCamelCase__ = { '''xlnet-base-cased''': None, '''xlnet-large-cased''': None, } lowerCamelCase__ = '''▁''' # Segments (not really needed) lowerCamelCase__ = 0 lowerCamelCase__ = 1 lowerCamelCase__ = 2 lowerCamelCase__ = 3 lowerCamelCase__ = 4 class _UpperCAmelCase ( lowerCAmelCase ): '''simple docstring''' __A = VOCAB_FILES_NAMES __A = PRETRAINED_VOCAB_FILES_MAP __A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A = '''left''' __A = XLNetTokenizer def __init__( self : Tuple , lowercase_ : Optional[Any]=None , lowercase_ : int=None , lowercase_ : int=False , lowercase_ : str=True , lowercase_ : List[str]=False , lowercase_ : List[str]="<s>" , lowercase_ : Union[str, Any]="</s>" , lowercase_ : List[str]="<unk>" , lowercase_ : Optional[Any]="<sep>" , lowercase_ : List[Any]="<pad>" , lowercase_ : List[Any]="<cls>" , lowercase_ : str="<mask>" , lowercase_ : Union[str, Any]=["<eop>", "<eod>"] , **lowercase_ : Tuple , ) -> List[str]: """simple docstring""" _UpperCamelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_) if isinstance(lowercase_ , lowercase_) else mask_token super().__init__( vocab_file=lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , ) _UpperCamelCase = 3 _UpperCamelCase = do_lower_case _UpperCamelCase = remove_space _UpperCamelCase = keep_accents _UpperCamelCase = vocab_file _UpperCamelCase = False if not self.vocab_file else True def __UpperCAmelCase ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]: """simple docstring""" _UpperCamelCase = [self.sep_token_id] _UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def __UpperCAmelCase ( self : int , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]: """simple docstring""" _UpperCamelCase = [self.sep_token_id] _UpperCamelCase = [2] if token_ids_a is None: return len(token_ids_a + sep) * [0] + cls_segment_id return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id def __UpperCAmelCase ( self : Any , lowercase_ : str , lowercase_ : Optional[str] = None) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(lowercase_): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return _UpperCamelCase = os.path.join( lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_): copyfile(self.vocab_file , lowercase_) return (out_vocab_file,)
63
0
'''simple docstring''' from jiwer import compute_measures import datasets __lowercase : List[str] = '''\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } ''' __lowercase : Any = '''\ Word error rate (WER) is a common metric of the performance of an automatic speech recognition system. The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort. This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate. Word error rate can then be computed as: WER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct words, N is the number of words in the reference (N=S+D+C). This value indicates the average number of errors per reference word. The lower the value, the better the performance of the ASR system with a WER of 0 being a perfect score. ''' __lowercase : int = ''' Compute WER score of transcribed segments against references. Args: references: List of references for each speech input. predictions: List of transcriptions to score. concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively. Returns: (float): the word error rate Examples: >>> predictions = [\"this is the prediction\", \"there is an other sample\"] >>> references = [\"this is the reference\", \"there is another one\"] >>> wer = datasets.load_metric(\"wer\") >>> wer_score = wer.compute(predictions=predictions, references=references) >>> print(wer_score) 0.5 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowercase ( datasets.Metric ): def UpperCAmelCase__ (self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/Word_error_rate''', ] , ) def UpperCAmelCase__ (self , A=None , A=None , A=False ): if concatenate_texts: return compute_measures(lowerCamelCase_ , lowerCamelCase_ )["wer"] else: lowerCamelCase_ : Tuple = 0 lowerCamelCase_ : Dict = 0 for prediction, reference in zip(lowerCamelCase_ , lowerCamelCase_ ): lowerCamelCase_ : Optional[int] = compute_measures(lowerCamelCase_ , lowerCamelCase_ ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
318
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all LED models at https://huggingface.co/models?filter=LED _SCREAMING_SNAKE_CASE = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } _SCREAMING_SNAKE_CASE = { """allenai/led-base-16384""": 1_6_3_8_4, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def lowercase( ) -> List[str]: '''simple docstring''' UpperCamelCase = ( list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) ) ) UpperCamelCase = bs[:] UpperCamelCase = 0 for b in range(2**8 ): if b not in bs: bs.append(UpperCamelCase_ ) cs.append(2**8 + n ) n += 1 UpperCamelCase = [chr(UpperCamelCase_ ) for n in cs] return dict(zip(UpperCamelCase_ , UpperCamelCase_ ) ) def lowercase( UpperCamelCase_ ) -> List[str]: '''simple docstring''' UpperCamelCase = set() UpperCamelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCamelCase = char return pairs class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = VOCAB_FILES_NAMES __lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase = ["""input_ids""", """attention_mask"""] def __init__( self : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str="replace" , lowerCamelCase_ : Any="<s>" , lowerCamelCase_ : List[Any]="</s>" , lowerCamelCase_ : List[Any]="</s>" , lowerCamelCase_ : str="<s>" , lowerCamelCase_ : str="<unk>" , lowerCamelCase_ : int="<pad>" , lowerCamelCase_ : List[str]="<mask>" , lowerCamelCase_ : str=False , **lowerCamelCase_ : str , ): """simple docstring""" UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token super().__init__( errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , ) with open(lowerCamelCase_ , encoding="""utf-8""" ) as vocab_handle: UpperCamelCase = json.load(lowerCamelCase_ ) UpperCamelCase = {v: k for k, v in self.encoder.items()} UpperCamelCase = errors # how to handle errors in decoding UpperCamelCase = bytes_to_unicode() UpperCamelCase = {v: k for k, v in self.byte_encoder.items()} with open(lowerCamelCase_ , encoding="""utf-8""" ) as merges_handle: UpperCamelCase = merges_handle.read().split("""\n""" )[1:-1] UpperCamelCase = [tuple(merge.split() ) for merge in bpe_merges] UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) ) UpperCamelCase = {} UpperCamelCase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions UpperCamelCase = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def lowerCamelCase_ ( self : str ): """simple docstring""" return len(self.encoder ) def lowerCamelCase_ ( self : str ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Dict ): """simple docstring""" if token in self.cache: return self.cache[token] UpperCamelCase = tuple(lowerCamelCase_ ) UpperCamelCase = get_pairs(lowerCamelCase_ ) if not pairs: return token while True: UpperCamelCase = min(lowerCamelCase_ , key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break UpperCamelCase , UpperCamelCase = bigram UpperCamelCase = [] UpperCamelCase = 0 while i < len(lowerCamelCase_ ): try: UpperCamelCase = word.index(lowerCamelCase_ , lowerCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCamelCase = j if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCamelCase = tuple(lowerCamelCase_ ) UpperCamelCase = new_word if len(lowerCamelCase_ ) == 1: break else: UpperCamelCase = get_pairs(lowerCamelCase_ ) UpperCamelCase = """ """.join(lowerCamelCase_ ) UpperCamelCase = word return word def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Tuple ): """simple docstring""" UpperCamelCase = [] for token in re.findall(self.pat , lowerCamelCase_ ): UpperCamelCase = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) ) return bpe_tokens def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : str ): """simple docstring""" return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Any ): """simple docstring""" return self.decoder.get(lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ): """simple docstring""" UpperCamelCase = """""".join(lowerCamelCase_ ) UpperCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors ) return text def lowerCamelCase_ ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ): """simple docstring""" if not os.path.isdir(lowerCamelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCamelCase = os.path.join( lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCamelCase = os.path.join( lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + """\n""" ) UpperCamelCase = 0 with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" """ Please check that the tokenizer is not corrupted!""" ) UpperCamelCase = token_index writer.write(""" """.join(lowerCamelCase_ ) + """\n""" ) index += 1 return vocab_file, merge_file def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCamelCase = [self.cls_token_id] UpperCamelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase_ )) + [1] return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1] def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): """simple docstring""" UpperCamelCase = [self.sep_token_id] UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCamelCase_ ( self : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=False , **lowerCamelCase_ : Any ): """simple docstring""" UpperCamelCase = kwargs.pop("""add_prefix_space""" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()): UpperCamelCase = """ """ + text return (text, kwargs) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[bool] = None , ): """simple docstring""" UpperCamelCase = super()._pad( encoded_inputs=lowerCamelCase_ , max_length=lowerCamelCase_ , padding_strategy=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , ) # Load from model defaults if return_attention_mask is None: UpperCamelCase = """attention_mask""" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: UpperCamelCase = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. UpperCamelCase = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ ) if needs_to_be_padded: UpperCamelCase = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` UpperCamelCase = ( encoded_inputs["""global_attention_mask"""] + [-1] * difference ) elif self.padding_side == "left": UpperCamelCase = [-1] * difference + encoded_inputs[ """global_attention_mask""" ] else: raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) ) return encoded_inputs
343
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowercase__ = { "configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"], "tokenization_xlm": ["XLMTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ "XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMForMultipleChoice", "XLMForQuestionAnswering", "XLMForQuestionAnsweringSimple", "XLMForSequenceClassification", "XLMForTokenClassification", "XLMModel", "XLMPreTrainedModel", "XLMWithLMHeadModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ "TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLMForMultipleChoice", "TFXLMForQuestionAnsweringSimple", "TFXLMForSequenceClassification", "TFXLMForTokenClassification", "TFXLMMainLayer", "TFXLMModel", "TFXLMPreTrainedModel", "TFXLMWithLMHeadModel", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
280
'''simple docstring''' import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() lowercase__ = logging.get_logger("transformers.models.speecht5") def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): hf_model.apply_weight_norm() UpperCAmelCase : Dict = checkpoint['input_conv.weight_g'] UpperCAmelCase : Any = checkpoint['input_conv.weight_v'] UpperCAmelCase : Any = checkpoint['input_conv.bias'] for i in range(len(config.upsample_rates ) ): UpperCAmelCase : Any = checkpoint[F"""upsamples.{i}.1.weight_g"""] UpperCAmelCase : Union[str, Any] = checkpoint[F"""upsamples.{i}.1.weight_v"""] UpperCAmelCase : str = checkpoint[F"""upsamples.{i}.1.bias"""] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): UpperCAmelCase : Any = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""] UpperCAmelCase : Optional[int] = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""] UpperCAmelCase : Dict = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""] UpperCAmelCase : Union[str, Any] = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""] UpperCAmelCase : int = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""] UpperCAmelCase : Optional[int] = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""] UpperCAmelCase : Dict = checkpoint['output_conv.1.weight_g'] UpperCAmelCase : str = checkpoint['output_conv.1.weight_v'] UpperCAmelCase : Union[str, Any] = checkpoint['output_conv.1.bias'] hf_model.remove_weight_norm() @torch.no_grad() def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , ): if config_path is not None: UpperCAmelCase : Any = SpeechTaHifiGanConfig.from_pretrained(UpperCAmelCase_ ) else: UpperCAmelCase : Optional[Any] = SpeechTaHifiGanConfig() UpperCAmelCase : List[Any] = SpeechTaHifiGan(UpperCAmelCase_ ) UpperCAmelCase : int = torch.load(UpperCAmelCase_ ) load_weights(orig_checkpoint['model']['generator'] , UpperCAmelCase_ , UpperCAmelCase_ ) UpperCAmelCase : int = np.load(UpperCAmelCase_ ) UpperCAmelCase : str = stats[0].reshape(-1 ) UpperCAmelCase : List[str] = stats[1].reshape(-1 ) UpperCAmelCase : Union[str, Any] = torch.from_numpy(UpperCAmelCase_ ).float() UpperCAmelCase : List[str] = torch.from_numpy(UpperCAmelCase_ ).float() model.save_pretrained(UpperCAmelCase_ ) if repo_id: print('Pushing to the hub...' ) model.push_to_hub(UpperCAmelCase_ ) if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint") parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) lowercase__ = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
280
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( BertTokenizer, ViltConfig, ViltForImageAndTextRetrieval, ViltForImagesAndTextClassification, ViltForMaskedLM, ViltForQuestionAnswering, ViltImageProcessor, ViltProcessor, ) from transformers.utils import logging logging.set_verbosity_info() snake_case : List[Any] = logging.get_logger(__name__) def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Optional[int]=False ): """simple docstring""" a :Any = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''transformer.blocks.{i}.norm1.weight''', F'''vilt.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''transformer.blocks.{i}.norm1.bias''', F'''vilt.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''transformer.blocks.{i}.attn.proj.weight''', F'''vilt.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (F'''transformer.blocks.{i}.attn.proj.bias''', F'''vilt.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''transformer.blocks.{i}.norm2.weight''', F'''vilt.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''transformer.blocks.{i}.norm2.bias''', F'''vilt.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append( (F'''transformer.blocks.{i}.mlp.fc1.weight''', F'''vilt.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''transformer.blocks.{i}.mlp.fc1.bias''', F'''vilt.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.weight''', F'''vilt.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.bias''', F'''vilt.encoder.layer.{i}.output.dense.bias''') ) # embeddings rename_keys.extend( [ # text embeddings ('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''), ( '''text_embeddings.position_embeddings.weight''', '''vilt.embeddings.text_embeddings.position_embeddings.weight''', ), ('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''), ( '''text_embeddings.token_type_embeddings.weight''', '''vilt.embeddings.text_embeddings.token_type_embeddings.weight''', ), ('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''), ('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''), # patch embeddings ('''transformer.cls_token''', '''vilt.embeddings.cls_token'''), ('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''), ('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''), ('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''), # token type embeddings ('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''), ] ) # final layernorm + pooler rename_keys.extend( [ ('''transformer.norm.weight''', '''vilt.layernorm.weight'''), ('''transformer.norm.bias''', '''vilt.layernorm.bias'''), ('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''), ('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''), ] ) # classifier head(s) if vqa_model: # classification head rename_keys.extend( [ ('''vqa_classifier.0.weight''', '''classifier.0.weight'''), ('''vqa_classifier.0.bias''', '''classifier.0.bias'''), ('''vqa_classifier.1.weight''', '''classifier.1.weight'''), ('''vqa_classifier.1.bias''', '''classifier.1.bias'''), ('''vqa_classifier.3.weight''', '''classifier.3.weight'''), ('''vqa_classifier.3.bias''', '''classifier.3.bias'''), ] ) elif nlvr_model: # classification head rename_keys.extend( [ ('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''), ('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''), ('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''), ('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''), ('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''), ('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''), ] ) else: pass return rename_keys def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str ): """simple docstring""" for i in range(config.num_hidden_layers ): a :str = '''vilt.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) a :List[Any] = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.weight''' ) a :Union[str, Any] = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict a :Union[str, Any] = in_proj_weight[ : config.hidden_size, : ] a :Dict = in_proj_bias[: config.hidden_size] a :Any = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] a :Optional[int] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] a :str = in_proj_weight[ -config.hidden_size :, : ] a :Union[str, Any] = in_proj_bias[-config.hidden_size :] def __lowerCamelCase ( UpperCAmelCase_ : Dict ): """simple docstring""" a :List[str] = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ ) def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any ): """simple docstring""" a :int = dct.pop(UpperCAmelCase_ ) a :Tuple = val @torch.no_grad() def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str ): """simple docstring""" a :Optional[int] = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=UpperCAmelCase_ ) a :Union[str, Any] = False a :str = False a :Union[str, Any] = False a :str = False if "vqa" in checkpoint_url: a :List[str] = True a :str = 3129 a :Optional[int] = '''huggingface/label-files''' a :Any = '''vqa2-id2label.json''' a :Optional[int] = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='''dataset''' ) , '''r''' ) ) a :Any = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()} a :Optional[Any] = idalabel a :List[Any] = {v: k for k, v in idalabel.items()} a :Tuple = ViltForQuestionAnswering(UpperCAmelCase_ ) elif "nlvr" in checkpoint_url: a :Optional[int] = True a :List[str] = 2 a :Union[str, Any] = {0: '''False''', 1: '''True'''} a :List[Any] = {v: k for k, v in config.idalabel.items()} a :List[str] = 3 a :Any = ViltForImagesAndTextClassification(UpperCAmelCase_ ) elif "irtr" in checkpoint_url: a :Optional[int] = True a :List[Any] = ViltForImageAndTextRetrieval(UpperCAmelCase_ ) elif "mlm_itm" in checkpoint_url: a :Tuple = True a :Optional[int] = ViltForMaskedLM(UpperCAmelCase_ ) else: raise ValueError('''Unknown model type''' ) # load state_dict of original model, remove and rename some keys a :Dict = torch.hub.load_state_dict_from_url(UpperCAmelCase_ , map_location='''cpu''' )['''state_dict'''] a :Dict = create_rename_keys(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) for src, dest in rename_keys: rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) read_in_q_k_v(UpperCAmelCase_ , UpperCAmelCase_ ) if mlm_model or irtr_model: a :str = ['''itm_score.fc.weight''', '''itm_score.fc.bias'''] for k in ignore_keys: state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ ) # load state dict into HuggingFace model model.eval() if mlm_model: a , a :List[Any] = model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ ) assert missing_keys == ["mlm_score.decoder.bias"] else: model.load_state_dict(UpperCAmelCase_ ) # Define processor a :Union[str, Any] = ViltImageProcessor(size=384 ) a :List[str] = BertTokenizer.from_pretrained('''bert-base-uncased''' ) a :List[str] = ViltProcessor(UpperCAmelCase_ , UpperCAmelCase_ ) # Forward pass on example inputs (image + text) if nlvr_model: a :Tuple = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=UpperCAmelCase_ ).raw ) a :Optional[int] = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=UpperCAmelCase_ ).raw ) a :Any = ( '''The left image contains twice the number of dogs as the right image, and at least two dogs in total are''' ''' standing.''' ) a :List[Any] = processor(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='''pt''' ) a :Union[str, Any] = processor(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='''pt''' ) a :int = model( input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , ) else: a :int = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=UpperCAmelCase_ ).raw ) if mlm_model: a :List[Any] = '''a bunch of [MASK] laying on a [MASK].''' else: a :List[Any] = '''How many cats are there?''' a :Optional[Any] = processor(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='''pt''' ) a :List[str] = model(**UpperCAmelCase_ ) # Verify outputs if mlm_model: a :Any = torch.Size([1, 11, 3_0522] ) a :List[str] = torch.tensor([-12.5061, -12.5123, -12.5174] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , UpperCAmelCase_ , atol=1E-4 ) # verify masked token prediction equals "cats" a :Union[str, Any] = outputs.logits[0, 4, :].argmax(-1 ).item() assert tokenizer.decode([predicted_id] ) == "cats" elif vqa_model: a :Tuple = torch.Size([1, 3129] ) a :List[str] = torch.tensor([-15.9495, -18.1472, -10.3041] ) assert torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , UpperCAmelCase_ , atol=1E-4 ) # verify vqa prediction equals "2" a :int = outputs.logits.argmax(-1 ).item() assert model.config.idalabel[predicted_idx] == "2" elif nlvr_model: a :Tuple = torch.Size([1, 2] ) a :Optional[int] = torch.tensor([-2.8721, 2.1291] ) assert torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ) assert outputs.logits.shape == expected_shape Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ ) print(F'''Saving model and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCAmelCase_ ) processor.save_pretrained(UpperCAmelCase_ ) if __name__ == "__main__": snake_case : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) snake_case : List[str] = parser.parse_args() convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
94
import sys snake_case : int = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def __lowerCamelCase ( UpperCAmelCase_ : str = N ): """simple docstring""" a :Optional[Any] = -sys.maxsize - 1 for i in range(len(UpperCAmelCase_ ) - 12 ): a :Dict = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: a :str = product return largest_product if __name__ == "__main__": print(F"""{solution() = }""")
94
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a_ = { '''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''], '''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''], '''processing_whisper''': ['''WhisperProcessor'''], '''tokenization_whisper''': ['''WhisperTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ['''WhisperTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ '''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''WhisperForConditionalGeneration''', '''WhisperModel''', '''WhisperPreTrainedModel''', '''WhisperForAudioClassification''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ '''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFWhisperForConditionalGeneration''', '''TFWhisperModel''', '''TFWhisperPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ '''FlaxWhisperForConditionalGeneration''', '''FlaxWhisperModel''', '''FlaxWhisperPreTrainedModel''', '''FlaxWhisperForAudioClassification''', ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
369
"""simple docstring""" import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import VideoMAEConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, ) from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class __snake_case : def __init__( self , lowercase , lowercase=13 , lowercase=10 , lowercase=3 , lowercase=2 , lowercase=2 , lowercase=2 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase=0.9 , lowercase=None , ) -> Optional[Any]: '''simple docstring''' a__: int = parent a__: int = batch_size a__: int = image_size a__: Optional[int] = num_channels a__: List[str] = patch_size a__: List[str] = tubelet_size a__: Any = num_frames a__: Any = is_training a__: Dict = use_labels a__: Optional[Any] = hidden_size a__: Optional[int] = num_hidden_layers a__: Optional[Any] = num_attention_heads a__: Optional[Any] = intermediate_size a__: Any = hidden_act a__: Dict = hidden_dropout_prob a__: Union[str, Any] = attention_probs_dropout_prob a__: List[Any] = type_sequence_label_size a__: Optional[Any] = initializer_range a__: List[str] = mask_ratio a__: Union[str, Any] = scope # in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame a__: Dict = (image_size // patch_size) ** 2 a__: Tuple = (num_frames // tubelet_size) * self.num_patches_per_frame # use this variable to define bool_masked_pos a__: Tuple = int(mask_ratio * self.seq_length) def lowerCamelCase_ ( self) -> Optional[Any]: '''simple docstring''' a__: List[Any] = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size]) a__: Any = None if self.use_labels: a__: Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size) a__: Optional[int] = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self) -> Union[str, Any]: '''simple docstring''' return VideoMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , ) def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> str: '''simple docstring''' a__: Any = VideoMAEModel(config=lowercase) model.to(lowercase) model.eval() a__: Optional[Any] = model(lowercase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> int: '''simple docstring''' a__: List[str] = VideoMAEForPreTraining(lowercase) model.to(lowercase) model.eval() # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch a__: int = torch.ones((self.num_masks,)) a__: Any = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0))]) a__: int = mask.expand(self.batch_size , -1).bool() a__: Union[str, Any] = model(lowercase , lowercase) # model only returns predictions for masked patches a__: List[str] = mask.sum().item() a__: str = 3 * self.tubelet_size * self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels)) def lowerCamelCase_ ( self) -> int: '''simple docstring''' a__: Dict = self.prepare_config_and_inputs() a__ , a__ , a__: Dict = config_and_inputs a__: Any = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): a__ = ( (VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else () ) a__ = ( {"""feature-extraction""": VideoMAEModel, """video-classification""": VideoMAEForVideoClassification} if is_torch_available() else {} ) a__ = False a__ = False a__ = False a__ = False def lowerCamelCase_ ( self) -> Tuple: '''simple docstring''' a__: List[str] = VideoMAEModelTester(self) a__: str = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37) def lowerCamelCase_ ( self , lowercase , lowercase , lowercase=False) -> Any: '''simple docstring''' a__: Optional[int] = copy.deepcopy(lowercase) if model_class == VideoMAEForPreTraining: # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch a__: List[Any] = torch.ones((self.model_tester.num_masks,)) a__: List[Any] = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0))]) a__: Optional[int] = mask.expand(self.model_tester.batch_size , -1).bool() a__: Union[str, Any] = bool_masked_pos.to(lowercase) if return_labels: if model_class in [ *get_values(lowercase), ]: a__: str = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowercase) return inputs_dict def lowerCamelCase_ ( self) -> Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='VideoMAE does not use inputs_embeds') def lowerCamelCase_ ( self) -> Optional[Any]: '''simple docstring''' pass def lowerCamelCase_ ( self) -> int: '''simple docstring''' a__ , a__: Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__: Union[str, Any] = model_class(lowercase) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) a__: str = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase , nn.Linear)) def lowerCamelCase_ ( self) -> List[Any]: '''simple docstring''' a__ , a__: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__: Any = model_class(lowercase) a__: int = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__: Optional[Any] = [*signature.parameters.keys()] a__: Tuple = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowercase) def lowerCamelCase_ ( self) -> Union[str, Any]: '''simple docstring''' a__: Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase) def lowerCamelCase_ ( self) -> List[str]: '''simple docstring''' a__: Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowercase) @slow def lowerCamelCase_ ( self) -> Union[str, Any]: '''simple docstring''' for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__: int = VideoMAEModel.from_pretrained(lowercase) self.assertIsNotNone(lowercase) def lowerCamelCase_ ( self) -> Union[str, Any]: '''simple docstring''' if not self.has_attentions: pass else: a__ , a__: Any = self.model_tester.prepare_config_and_inputs_for_common() a__: str = True for model_class in self.all_model_classes: a__: Optional[int] = self.model_tester.seq_length - self.model_tester.num_masks a__: List[str] = ( num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length ) a__: Tuple = True a__: str = False a__: Dict = True a__: List[Any] = model_class(lowercase) model.to(lowercase) model.eval() with torch.no_grad(): a__: int = model(**self._prepare_for_class(lowercase , lowercase)) a__: Any = outputs.attentions self.assertEqual(len(lowercase) , self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] a__: Tuple = True a__: List[Any] = model_class(lowercase) model.to(lowercase) model.eval() with torch.no_grad(): a__: str = model(**self._prepare_for_class(lowercase , lowercase)) a__: int = outputs.attentions self.assertEqual(len(lowercase) , self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) a__: Optional[Any] = len(lowercase) # Check attention is always last and order is fine a__: str = True a__: Dict = True a__: Tuple = model_class(lowercase) model.to(lowercase) model.eval() with torch.no_grad(): a__: Optional[Any] = model(**self._prepare_for_class(lowercase , lowercase)) self.assertEqual(out_len + 1 , len(lowercase)) a__: int = outputs.attentions self.assertEqual(len(lowercase) , self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def lowerCamelCase_ ( self) -> List[Any]: '''simple docstring''' def check_hidden_states_output(lowercase , lowercase , lowercase): a__: Union[str, Any] = model_class(lowercase) model.to(lowercase) model.eval() with torch.no_grad(): a__: Tuple = model(**self._prepare_for_class(lowercase , lowercase)) a__: Dict = outputs.hidden_states a__: Union[str, Any] = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(lowercase) , lowercase) a__: Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks a__: Optional[Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , ) a__ , a__: List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__: Dict = True check_hidden_states_output(lowercase , lowercase , lowercase) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a__: List[Any] = True check_hidden_states_output(lowercase , lowercase , lowercase) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.') def lowerCamelCase_ ( self) -> int: '''simple docstring''' pass def __a ( ) ->List[Any]: a__: List[str] = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' ) a__: Dict = np.load(_SCREAMING_SNAKE_CASE ) return list(_SCREAMING_SNAKE_CASE ) @require_torch @require_vision class __snake_case ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self) -> str: '''simple docstring''' return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5]) if is_vision_available() else None ) @slow def lowerCamelCase_ ( self) -> Union[str, Any]: '''simple docstring''' a__: Dict = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics').to( lowercase) a__: Dict = self.default_image_processor a__: str = prepare_video() a__: Tuple = image_processor(lowercase , return_tensors='pt').to(lowercase) # forward pass with torch.no_grad(): a__: List[Any] = model(**lowercase) # verify the logits a__: str = torch.Size((1, 4_00)) self.assertEqual(outputs.logits.shape , lowercase) a__: Optional[Any] = torch.tensor([0.3669, -0.0688, -0.2421]).to(lowercase) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1e-4)) @slow def lowerCamelCase_ ( self) -> Dict: '''simple docstring''' a__: Tuple = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short').to(lowercase) a__: Optional[Any] = self.default_image_processor a__: List[Any] = prepare_video() a__: Union[str, Any] = image_processor(lowercase , return_tensors='pt').to(lowercase) # add boolean mask, indicating which patches to mask a__: Optional[Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt') a__: Any = torch.load(lowercase) # forward pass with torch.no_grad(): a__: Any = model(**lowercase) # verify the logits a__: Union[str, Any] = torch.Size([1, 14_08, 15_36]) a__: Union[str, Any] = torch.tensor( [[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=lowercase) self.assertEqual(outputs.logits.shape , lowercase) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowercase , atol=1e-4)) # verify the loss (`config.norm_pix_loss` = `True`) a__: Optional[int] = torch.tensor([0.5142] , device=lowercase) self.assertTrue(torch.allclose(outputs.loss , lowercase , atol=1e-4)) # verify the loss (`config.norm_pix_loss` = `False`) a__: int = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=lowercase).to( lowercase) with torch.no_grad(): a__: Union[str, Any] = model(**lowercase) a__: Optional[int] = torch.tensor(torch.tensor([0.6469]) , device=lowercase) self.assertTrue(torch.allclose(outputs.loss , lowercase , atol=1e-4))
203
0
import unittest import numpy as np import torch from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _snake_case ( A__ , unittest.TestCase ): _lowercase : List[Any] = DDIMPipeline _lowercase : Union[str, Any] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS _lowercase : str = PipelineTesterMixin.required_optional_params - { '''num_images_per_prompt''', '''latents''', '''callback''', '''callback_steps''', } _lowercase : Tuple = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS _lowercase : Tuple = False def SCREAMING_SNAKE_CASE__ ( self) -> int: torch.manual_seed(0) SCREAMING_SNAKE_CASE = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , ) SCREAMING_SNAKE_CASE = DDIMScheduler() SCREAMING_SNAKE_CASE = {'unet': unet, 'scheduler': scheduler} return components def SCREAMING_SNAKE_CASE__ ( self , a , a=0) -> Optional[int]: if str(a).startswith('mps'): SCREAMING_SNAKE_CASE = torch.manual_seed(a) else: SCREAMING_SNAKE_CASE = torch.Generator(device=a).manual_seed(a) SCREAMING_SNAKE_CASE = { 'batch_size': 1, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]: SCREAMING_SNAKE_CASE = 'cpu' SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = self.pipeline_class(**a) pipe.to(a) pipe.set_progress_bar_config(disable=a) SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a) SCREAMING_SNAKE_CASE = pipe(**a).images SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 32, 32, 3)) SCREAMING_SNAKE_CASE = np.array( [1.0_00E00, 5.7_17E-01, 4.7_17E-01, 1.0_00E00, 0.0_00E00, 1.0_00E00, 3.0_00E-04, 0.0_00E00, 9.0_00E-04]) SCREAMING_SNAKE_CASE = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(a , 1E-3) def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3) def SCREAMING_SNAKE_CASE__ ( self) -> str: super().test_save_load_local(expected_max_difference=3E-3) def SCREAMING_SNAKE_CASE__ ( self) -> Any: super().test_save_load_optional_components(expected_max_difference=3E-3) def SCREAMING_SNAKE_CASE__ ( self) -> Dict: super().test_inference_batch_single_identical(expected_max_diff=3E-3) @slow @require_torch_gpu class _snake_case ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = 'google/ddpm-cifar10-32' SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(a) SCREAMING_SNAKE_CASE = DDIMScheduler() SCREAMING_SNAKE_CASE = DDIMPipeline(unet=a , scheduler=a) ddim.to(a) ddim.set_progress_bar_config(disable=a) SCREAMING_SNAKE_CASE = torch.manual_seed(0) SCREAMING_SNAKE_CASE = ddim(generator=a , eta=0.0 , output_type='numpy').images SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 def SCREAMING_SNAKE_CASE__ ( self) -> Dict: SCREAMING_SNAKE_CASE = 'google/ddpm-ema-bedroom-256' SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(a) SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained(a) SCREAMING_SNAKE_CASE = DDIMPipeline(unet=a , scheduler=a) ddpm.to(a) ddpm.set_progress_bar_config(disable=a) SCREAMING_SNAKE_CASE = torch.manual_seed(0) SCREAMING_SNAKE_CASE = ddpm(generator=a , output_type='numpy').images SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) SCREAMING_SNAKE_CASE = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
137
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType a_ : List[Any] = logging.get_logger(__name__) a_ : str = { 'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json', 'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json', 'microsoft/deberta-v2-xlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json' ), 'microsoft/deberta-v2-xxlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json' ), } class _snake_case ( A__ ): _lowercase : Any = '''deberta-v2''' def __init__( self , a=12_8100 , a=1536 , a=24 , a=24 , a=6144 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=0 , a=0.02 , a=1E-7 , a=False , a=-1 , a=0 , a=True , a=None , a=0 , a="gelu" , **a , ) -> List[Any]: super().__init__(**a) SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = type_vocab_size SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = relative_attention SCREAMING_SNAKE_CASE = max_relative_positions SCREAMING_SNAKE_CASE = pad_token_id SCREAMING_SNAKE_CASE = position_biased_input # Backwards compatibility if type(a) == str: SCREAMING_SNAKE_CASE = [x.strip() for x in pos_att_type.lower().split('|')] SCREAMING_SNAKE_CASE = pos_att_type SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = kwargs.get('pooler_hidden_size' , a) SCREAMING_SNAKE_CASE = pooler_dropout SCREAMING_SNAKE_CASE = pooler_hidden_act class _snake_case ( A__ ): @property def SCREAMING_SNAKE_CASE__ ( self) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'choice', 2: 'sequence'} else: SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'sequence'} if self._config.type_vocab_size > 0: return OrderedDict( [('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)]) else: return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)]) @property def SCREAMING_SNAKE_CASE__ ( self) -> int: return 12 def SCREAMING_SNAKE_CASE__ ( self , a , a = -1 , a = -1 , a = -1 , a = False , a = None , a = 3 , a = 40 , a = 40 , a = None , ) -> Mapping[str, Any]: SCREAMING_SNAKE_CASE = super().generate_dummy_inputs(preprocessor=a , framework=a) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
137
1
"""simple docstring""" import inspect import os import re from transformers.configuration_utils import PretrainedConfig from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py a = '''src/transformers''' # This is to make sure the transformers module imported is the one in the repo. a = direct_transformers_import(PATH_TO_TRANSFORMERS) a = transformers.models.auto.configuration_auto.CONFIG_MAPPING a = { # used to compute the property `self.chunk_length` '''EncodecConfig''': ['''overlap'''], # used as `self.bert_model = BertModel(config, ...)` '''DPRConfig''': True, # not used in modeling files, but it's an important information '''FSMTConfig''': ['''langs'''], # used internally in the configuration class file '''GPTNeoConfig''': ['''attention_types'''], # used internally in the configuration class file '''EsmConfig''': ['''is_folding_model'''], # used during training (despite we don't have training script for these models yet) '''Mask2FormerConfig''': ['''ignore_value'''], # `ignore_value` used during training (despite we don't have training script for these models yet) # `norm` used in conversion script (despite not using in the modeling file) '''OneFormerConfig''': ['''ignore_value''', '''norm'''], # used during preprocessing and collation, see `collating_graphormer.py` '''GraphormerConfig''': ['''spatial_pos_max'''], # used internally in the configuration class file '''T5Config''': ['''feed_forward_proj'''], # used internally in the configuration class file # `tokenizer_class` get default value `T5Tokenizer` intentionally '''MT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''], '''UMT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''], # used internally in the configuration class file '''LongT5Config''': ['''feed_forward_proj'''], # used internally in the configuration class file '''SwitchTransformersConfig''': ['''feed_forward_proj'''], # having default values other than `1e-5` - we can't fix them without breaking '''BioGptConfig''': ['''layer_norm_eps'''], # having default values other than `1e-5` - we can't fix them without breaking '''GLPNConfig''': ['''layer_norm_eps'''], # having default values other than `1e-5` - we can't fix them without breaking '''SegformerConfig''': ['''layer_norm_eps'''], # having default values other than `1e-5` - we can't fix them without breaking '''CvtConfig''': ['''layer_norm_eps'''], # having default values other than `1e-5` - we can't fix them without breaking '''PerceiverConfig''': ['''layer_norm_eps'''], # used internally to calculate the feature size '''InformerConfig''': ['''num_static_real_features''', '''num_time_features'''], # used internally to calculate the feature size '''TimeSeriesTransformerConfig''': ['''num_static_real_features''', '''num_time_features'''], # used internally to calculate the feature size '''AutoformerConfig''': ['''num_static_real_features''', '''num_time_features'''], # used internally to calculate `mlp_dim` '''SamVisionConfig''': ['''mlp_ratio'''], # For (head) training, but so far not implemented '''ClapAudioConfig''': ['''num_classes'''], # Not used, but providing useful information to users '''SpeechT5HifiGanConfig''': ['''sampling_rate'''], } # TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure SPECIAL_CASES_TO_ALLOW.update( { '''CLIPSegConfig''': True, '''DeformableDetrConfig''': True, '''DetaConfig''': True, '''DinatConfig''': True, '''DonutSwinConfig''': True, '''EfficientFormerConfig''': True, '''FSMTConfig''': True, '''JukeboxConfig''': True, '''LayoutLMv2Config''': True, '''MaskFormerSwinConfig''': True, '''MT5Config''': True, '''NatConfig''': True, '''OneFormerConfig''': True, '''PerceiverConfig''': True, '''RagConfig''': True, '''SpeechT5Config''': True, '''SwinConfig''': True, '''Swin2SRConfig''': True, '''Swinv2Config''': True, '''SwitchTransformersConfig''': True, '''TableTransformerConfig''': True, '''TapasConfig''': True, '''TransfoXLConfig''': True, '''UniSpeechConfig''': True, '''UniSpeechSatConfig''': True, '''WavLMConfig''': True, '''WhisperConfig''': True, # TODO: @Arthur (for `alignment_head` and `alignment_layer`) '''JukeboxPriorConfig''': True, # TODO: @Younes (for `is_decoder`) '''Pix2StructTextConfig''': True, } ) def _snake_case ( _snake_case : List[str] , _snake_case : str , _snake_case : Dict , _snake_case : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' _A = False for attribute in attributes: for modeling_source in source_strings: # check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)` if ( F'''config.{attribute}''' in modeling_source or F'''getattr(config, "{attribute}"''' in modeling_source or F'''getattr(self.config, "{attribute}"''' in modeling_source ): _A = True # Deal with multi-line cases elif ( re.search( RF'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , _snake_case , ) is not None ): _A = True # `SequenceSummary` is called with `SequenceSummary(config)` elif attribute in [ "summary_type", "summary_use_proj", "summary_activation", "summary_last_dropout", "summary_proj_to_labels", "summary_first_dropout", ]: if "SequenceSummary" in modeling_source: _A = True if attribute_used: break if attribute_used: break # common and important attributes, even if they do not always appear in the modeling files _A = [ 'bos_index', 'eos_index', 'pad_index', 'unk_index', 'mask_index', 'image_size', 'use_cache', 'out_features', 'out_indices', ] _A = ['encoder_no_repeat_ngram_size'] # Special cases to be allowed _A = True if not attribute_used: _A = False for attribute in attributes: # Allow if the default value in the configuration class is different from the one in `PretrainedConfig` if attribute in ["is_encoder_decoder"] and default_value is True: _A = True elif attribute in ["tie_word_embeddings"] and default_value is False: _A = True # Allow cases without checking the default value in the configuration class elif attribute in attributes_to_allow + attributes_used_in_generation: _A = True elif attribute.endswith('_token_id' ): _A = True # configuration class specific cases if not case_allowed: _A = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] ) _A = allowed_cases is True or attribute in allowed_cases return attribute_used or case_allowed def _snake_case ( _snake_case : str ) -> Dict: '''simple docstring''' _A = dict(inspect.signature(config_class.__init__ ).parameters ) _A = [x for x in list(signature.keys() ) if x not in ['self', 'kwargs']] _A = [signature[param].default for param in parameter_names] # If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long # as one variant is used, the test should pass _A = {} if len(config_class.attribute_map ) > 0: _A = {v: k for k, v in config_class.attribute_map.items()} # Get the path to modeling source files _A = inspect.getsourcefile(_snake_case ) _A = os.path.dirname(_snake_case ) # Let's check against all frameworks: as long as one framework uses an attribute, we are good. _A = [os.path.join(_snake_case , _snake_case ) for fn in os.listdir(_snake_case ) if fn.startswith('modeling_' )] # Get the source code strings _A = [] for path in modeling_paths: if os.path.isfile(_snake_case ): with open(_snake_case ) as fp: modeling_sources.append(fp.read() ) _A = [] for config_param, default_value in zip(_snake_case , _snake_case ): # `attributes` here is all the variant names for `config_param` _A = [config_param] # some configuration classes have non-empty `attribute_map`, and both names could be used in the # corresponding modeling files. As long as one of them appears, it is fine. if config_param in reversed_attribute_map: attributes.append(reversed_attribute_map[config_param] ) if not check_attribute_being_used(_snake_case , _snake_case , _snake_case , _snake_case ): unused_attributes.append(attributes[0] ) return sorted(_snake_case ) def _snake_case ( ) -> Optional[Any]: '''simple docstring''' _A = {} for _config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in _config_class.__module__: continue # Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.) _A = [ cls for name, cls in inspect.getmembers( inspect.getmodule(_config_class ) , lambda _snake_case : inspect.isclass(_snake_case ) and issubclass(_snake_case , _snake_case ) and inspect.getmodule(_snake_case ) == inspect.getmodule(_config_class ) , ) ] for config_class in config_classes_in_module: _A = check_config_attributes_being_used(_snake_case ) if len(_snake_case ) > 0: _A = unused_attributes if len(_snake_case ) > 0: _A = 'The following configuration classes contain unused attributes in the corresponding modeling files:\n' for name, attributes in configs_with_unused_attributes.items(): error += F'''{name}: {attributes}\n''' raise ValueError(_snake_case ) if __name__ == "__main__": check_config_attributes()
371
"""simple docstring""" import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def _snake_case ( _snake_case : int = 8 ) -> str: '''simple docstring''' _A = ascii_letters + digits + punctuation return "".join(secrets.choice(_snake_case ) for _ in range(_snake_case ) ) def _snake_case ( _snake_case : str , _snake_case : int ) -> str: '''simple docstring''' i -= len(_snake_case ) _A = i // 3 _A = i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) _A = ( chars_incl + random(_snake_case , quotient + remainder ) + random(_snake_case , _snake_case ) + random(_snake_case , _snake_case ) ) _A = list(_snake_case ) shuffle(_snake_case ) return "".join(_snake_case ) # random is a generalised function for letters, characters and numbers def _snake_case ( _snake_case : str , _snake_case : int ) -> str: '''simple docstring''' return "".join(secrets.choice(_snake_case ) for _ in range(_snake_case ) ) def _snake_case ( _snake_case : Dict , _snake_case : Optional[int] ) -> int: '''simple docstring''' pass # Put your code here... def _snake_case ( _snake_case : Any , _snake_case : str ) -> Dict: '''simple docstring''' pass # Put your code here... def _snake_case ( _snake_case : Union[str, Any] , _snake_case : int ) -> int: '''simple docstring''' pass # Put your code here... def _snake_case ( _snake_case : str , _snake_case : int = 8 ) -> bool: '''simple docstring''' if len(_snake_case ) < min_length: # Your Password must be at least 8 characters long return False _A = any(char in ascii_uppercase for char in password ) _A = any(char in ascii_lowercase for char in password ) _A = any(char in digits for char in password ) _A = any(char in punctuation for char in password ) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def _snake_case ( ) -> Optional[Any]: '''simple docstring''' _A = int(input('Please indicate the max length of your password: ' ).strip() ) _A = input( 'Please indicate the characters that must be in your password: ' ).strip() print('Password generated:' , password_generator(_snake_case ) ) print( 'Alternative Password generated:' , alternative_password_generator(_snake_case , _snake_case ) , ) print('[If you are thinking of using this passsword, You better save it.]' ) if __name__ == "__main__": main()
271
0
'''simple docstring''' import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE : def __init__( self , _UpperCAmelCase , _UpperCAmelCase=2 , _UpperCAmelCase=8 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=16 , _UpperCAmelCase=5 , _UpperCAmelCase=2 , _UpperCAmelCase=36 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ): '''simple docstring''' __A : Union[str, Any] = parent __A : int = batch_size __A : List[Any] = seq_length __A : Optional[int] = is_training __A : Dict = use_input_mask __A : Tuple = use_token_type_ids __A : Optional[int] = use_labels __A : Tuple = vocab_size __A : Dict = hidden_size __A : Any = num_hidden_layers __A : int = num_attention_heads __A : Optional[Any] = intermediate_size __A : List[str] = hidden_act __A : Union[str, Any] = hidden_dropout_prob __A : Optional[Any] = attention_probs_dropout_prob __A : List[Any] = max_position_embeddings __A : Tuple = type_vocab_size __A : Any = type_sequence_label_size __A : str = initializer_range __A : Optional[Any] = num_labels __A : List[Any] = num_choices __A : Union[str, Any] = scope def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) __A : Tuple = None if self.use_input_mask: __A : Dict = random_attention_mask([self.batch_size, self.seq_length]) __A : Dict = None if self.use_token_type_ids: __A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) __A : Tuple = None __A : Tuple = None __A : int = None if self.use_labels: __A : int = ids_tensor([self.batch_size] , self.type_sequence_label_size) __A : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) __A : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices) __A : Optional[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Tuple = self.get_config() __A : Tuple = 300 return config def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' ( ( __A ) ,( __A ) ,( __A ) ,( __A ) ,( __A ) ,( __A ) ,( __A ) , ) : List[Any] = self.prepare_config_and_inputs() __A : List[str] = True __A : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) __A : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Any = MraModel(config=A_) model.to(A_) model.eval() __A : Any = model(A_ , attention_mask=A_ , token_type_ids=A_) __A : Optional[int] = model(A_ , token_type_ids=A_) __A : Union[str, Any] = model(A_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): '''simple docstring''' __A : Optional[int] = True __A : Tuple = MraModel(A_) model.to(A_) model.eval() __A : List[str] = model( A_ , attention_mask=A_ , token_type_ids=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , ) __A : int = model( A_ , attention_mask=A_ , token_type_ids=A_ , encoder_hidden_states=A_ , ) __A : Optional[Any] = model(A_ , attention_mask=A_ , token_type_ids=A_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : List[Any] = MraForMaskedLM(config=A_) model.to(A_) model.eval() __A : List[Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Dict = MraForQuestionAnswering(config=A_) model.to(A_) model.eval() __A : Tuple = model( A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Dict = self.num_labels __A : Dict = MraForSequenceClassification(A_) model.to(A_) model.eval() __A : Optional[Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Optional[int] = self.num_labels __A : List[Any] = MraForTokenClassification(config=A_) model.to(A_) model.eval() __A : List[Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : List[Any] = self.num_choices __A : List[Any] = MraForMultipleChoice(config=A_) model.to(A_) model.eval() __A : Optional[int] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() __A : Union[str, Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() __A : List[Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() __A : Any = model( A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[str] = self.prepare_config_and_inputs() ( ( __A ) ,( __A ) ,( __A ) ,( __A ) ,( __A ) ,( __A ) ,( __A ) , ) : Union[str, Any] = config_and_inputs __A : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ): lowerCAmelCase = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = () def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Dict = MraModelTester(self) __A : Dict = ConfigTester(self , config_class=A_ , hidden_size=37) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __A : Optional[int] = type self.model_tester.create_and_check_model(*A_) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*A_) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*A_) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A_) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A_) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A_) @slow def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __A : List[str] = MraModel.from_pretrained(A_) self.assertIsNotNone(A_) @unittest.skip(reason='MRA does not output attentions') def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' return @require_torch class SCREAMING_SNAKE_CASE (unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Dict = MraModel.from_pretrained('uw-madison/mra-base-512-4') __A : List[str] = torch.arange(256).unsqueeze(0) with torch.no_grad(): __A : Optional[int] = model(A_)[0] __A : Dict = torch.Size((1, 256, 768)) self.assertEqual(output.shape , A_) __A : str = torch.tensor( [[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , A_ , atol=1e-4)) @slow def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[Any] = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4') __A : str = torch.arange(256).unsqueeze(0) with torch.no_grad(): __A : List[str] = model(A_)[0] __A : Dict = 5_0265 __A : str = torch.Size((1, 256, vocab_size)) self.assertEqual(output.shape , A_) __A : Optional[Any] = torch.tensor( [[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , A_ , atol=1e-4)) @slow def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Tuple = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3') __A : Dict = torch.arange(4096).unsqueeze(0) with torch.no_grad(): __A : Optional[Any] = model(A_)[0] __A : str = 5_0265 __A : Union[str, Any] = torch.Size((1, 4096, vocab_size)) self.assertEqual(output.shape , A_) __A : int = torch.tensor( [[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , A_ , atol=1e-4))
190
from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def _SCREAMING_SNAKE_CASE ( lowercase : str = "laptop" ): '''simple docstring''' lowerCamelCase_ = f"""https://www.amazon.in/laptop/s?k={product}""" lowerCamelCase_ = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36', 'Accept-Language': 'en-US, en;q=0.5', } lowerCamelCase_ = BeautifulSoup(requests.get(lowercase , headers=lowercase ).text ) # Initialize a Pandas dataframe with the column titles lowerCamelCase_ = DataFrame( columns=[ 'Product Title', 'Product Link', 'Current Price of the product', 'Product Rating', 'MRP of the product', 'Discount', ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( 'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ): try: lowerCamelCase_ = item.ha.text lowerCamelCase_ = 'https://www.amazon.in/' + item.ha.a['href'] lowerCamelCase_ = item.find('span' , attrs={'class': 'a-offscreen'} ).text try: lowerCamelCase_ = item.find('span' , attrs={'class': 'a-icon-alt'} ).text except AttributeError: lowerCamelCase_ = 'Not available' try: lowerCamelCase_ = ( '₹' + item.find( 'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1] ) except AttributeError: lowerCamelCase_ = '' try: lowerCamelCase_ = float( ( ( float(product_mrp.strip('₹' ).replace(',' , '' ) ) - float(product_price.strip('₹' ).replace(',' , '' ) ) ) / float(product_mrp.strip('₹' ).replace(',' , '' ) ) ) * 1_00 ) except ValueError: lowerCamelCase_ = float('nan' ) except AttributeError: pass lowerCamelCase_ = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] lowerCamelCase_ = ' ' lowerCamelCase_ = ' ' data_frame.index += 1 return data_frame if __name__ == "__main__": lowerCamelCase : Tuple = "headphones" get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
204
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) def lowerCAmelCase_ ( __UpperCAmelCase: Tuple , __UpperCAmelCase: List[Any]=False , __UpperCAmelCase: int=False ) -> int: UpperCamelCase__ : Any = '''backbone.''' if is_semantic else '''''' UpperCamelCase__ : int = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"{prefix}blocks.{i}.norm1.weight", f"beit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((f"{prefix}blocks.{i}.norm1.bias", f"beit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append( (f"{prefix}blocks.{i}.attn.proj.weight", f"beit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append( (f"{prefix}blocks.{i}.attn.proj.bias", f"beit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((f"{prefix}blocks.{i}.norm2.weight", f"beit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((f"{prefix}blocks.{i}.norm2.bias", f"beit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.weight", f"beit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.bias", f"beit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.weight", f"beit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.bias", f"beit.encoder.layer.{i}.output.dense.bias") ) # projection layer + position embeddings rename_keys.extend( [ (f"{prefix}cls_token", '''beit.embeddings.cls_token'''), (f"{prefix}patch_embed.proj.weight", '''beit.embeddings.patch_embeddings.projection.weight'''), (f"{prefix}patch_embed.proj.bias", '''beit.embeddings.patch_embeddings.projection.bias'''), (f"{prefix}pos_embed", '''beit.embeddings.position_embeddings'''), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ('''mask_token''', '''beit.embeddings.mask_token'''), ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ] ) else: # layernorm + classification head rename_keys.extend( [ ('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''), ('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def lowerCAmelCase_ ( __UpperCAmelCase: Dict , __UpperCAmelCase: int , __UpperCAmelCase: Union[str, Any]=False , __UpperCAmelCase: str=False ) -> str: for i in range(config.num_hidden_layers ): UpperCamelCase__ : List[str] = '''backbone.''' if is_semantic else '''''' # queries, keys and values UpperCamelCase__ : Optional[Any] = state_dict.pop(f"{prefix}blocks.{i}.attn.qkv.weight" ) UpperCamelCase__ : Tuple = state_dict.pop(f"{prefix}blocks.{i}.attn.q_bias" ) UpperCamelCase__ : Any = state_dict.pop(f"{prefix}blocks.{i}.attn.v_bias" ) UpperCamelCase__ : Any = in_proj_weight[ : config.hidden_size, : ] UpperCamelCase__ : Any = q_bias UpperCamelCase__ : Tuple = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCamelCase__ : Optional[Any] = in_proj_weight[ -config.hidden_size :, : ] UpperCamelCase__ : Union[str, Any] = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained UpperCamelCase__ : List[str] = state_dict.pop(f"{prefix}blocks.{i}.gamma_1" ) UpperCamelCase__ : Union[str, Any] = state_dict.pop(f"{prefix}blocks.{i}.gamma_2" ) UpperCamelCase__ : Any = gamma_a UpperCamelCase__ : Tuple = gamma_a def lowerCAmelCase_ ( __UpperCAmelCase: Any , __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: List[Any] ) -> Optional[Any]: UpperCamelCase__ : Any = dct.pop(__UpperCAmelCase ) UpperCamelCase__ : Any = val def lowerCAmelCase_ ( ) -> Union[str, Any]: UpperCamelCase__ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCamelCase__ : Union[str, Any] = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( __UpperCAmelCase: Dict , __UpperCAmelCase: Tuple , __UpperCAmelCase: List[str]=False ) -> Tuple: UpperCamelCase__ : Dict = False if '''rvlcdip''' in checkpoint_url else True UpperCamelCase__ : Any = BeitConfig(use_absolute_position_embeddings=__UpperCAmelCase , use_mask_token=__UpperCAmelCase ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: UpperCamelCase__ : Dict = 1024 UpperCamelCase__ : Dict = 4096 UpperCamelCase__ : Any = 24 UpperCamelCase__ : Any = 16 # labels if "rvlcdip" in checkpoint_url: UpperCamelCase__ : int = 16 UpperCamelCase__ : List[Any] = '''huggingface/label-files''' UpperCamelCase__ : Dict = '''rvlcdip-id2label.json''' UpperCamelCase__ : Tuple = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) ) UpperCamelCase__ : List[Any] = {int(__UpperCAmelCase ): v for k, v in idalabel.items()} UpperCamelCase__ : Dict = idalabel UpperCamelCase__ : List[Any] = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys UpperCamelCase__ : Tuple = torch.hub.load_state_dict_from_url(__UpperCAmelCase , map_location='''cpu''' )['''model'''] UpperCamelCase__ : str = create_rename_keys(__UpperCAmelCase , has_lm_head=__UpperCAmelCase ) for src, dest in rename_keys: rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase , has_lm_head=__UpperCAmelCase ) # load HuggingFace model UpperCamelCase__ : List[str] = BeitForMaskedImageModeling(__UpperCAmelCase ) if has_lm_head else BeitForImageClassification(__UpperCAmelCase ) model.eval() model.load_state_dict(__UpperCAmelCase ) # Check outputs on an image UpperCamelCase__ : Any = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__UpperCAmelCase ) UpperCamelCase__ : int = prepare_img() UpperCamelCase__ : Dict = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' ) UpperCamelCase__ : Dict = encoding['''pixel_values'''] UpperCamelCase__ : List[str] = model(__UpperCAmelCase ) UpperCamelCase__ : List[str] = outputs.logits # verify logits UpperCamelCase__ : str = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8192] assert logits.shape == torch.Size(__UpperCAmelCase ), "Shape of logits not as expected" Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase ) print(f"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(__UpperCAmelCase ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__UpperCAmelCase ) if push_to_hub: if has_lm_head: UpperCamelCase__ : Any = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large''' else: UpperCamelCase__ : Optional[Any] = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip''' image_processor.push_to_hub( repo_path_or_name=Path(__UpperCAmelCase , __UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=__UpperCAmelCase , ) model.push_to_hub( repo_path_or_name=Path(__UpperCAmelCase , __UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=__UpperCAmelCase , ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument( '--checkpoint_url', default='https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth', type=str, help='URL to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument( '--push_to_hub', action='store_true', ) UpperCAmelCase_ = parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
247
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase_ = { 'configuration_xlm_roberta': [ 'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMRobertaConfig', 'XLMRobertaOnnxConfig', ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ['XLMRobertaTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ['XLMRobertaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ 'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'XLMRobertaForCausalLM', 'XLMRobertaForMaskedLM', 'XLMRobertaForMultipleChoice', 'XLMRobertaForQuestionAnswering', 'XLMRobertaForSequenceClassification', 'XLMRobertaForTokenClassification', 'XLMRobertaModel', 'XLMRobertaPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ 'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXLMRobertaForCausalLM', 'TFXLMRobertaForMaskedLM', 'TFXLMRobertaForMultipleChoice', 'TFXLMRobertaForQuestionAnswering', 'TFXLMRobertaForSequenceClassification', 'TFXLMRobertaForTokenClassification', 'TFXLMRobertaModel', 'TFXLMRobertaPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ 'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'FlaxXLMRobertaForMaskedLM', 'FlaxXLMRobertaForCausalLM', 'FlaxXLMRobertaForMultipleChoice', 'FlaxXLMRobertaForQuestionAnswering', 'FlaxXLMRobertaForSequenceClassification', 'FlaxXLMRobertaForTokenClassification', 'FlaxXLMRobertaModel', 'FlaxXLMRobertaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
247
1
"""simple docstring""" from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging lowercase__ = logging.get_logger(__name__) def _snake_case ( lowercase__ , lowercase__ ): try: with open(lowercase__ , 'rb' ) as flax_state_f: _lowerCamelCase : Union[str, Any] = from_bytes(lowercase__ , flax_state_f.read() ) except UnpicklingError as e: try: with open(lowercase__ ) as f: if f.read().startswith('version' ): raise OSError( 'You seem to have cloned a repository without having git-lfs installed. Please' ' install git-lfs and run `git lfs install` followed by `git lfs pull` in the' ' folder you cloned.' ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(f'''Unable to convert {model_file} to Flax deserializable object. ''' ) return load_flax_weights_in_pytorch_model(lowercase__ , lowercase__ ) def _snake_case ( lowercase__ , lowercase__ ): try: import torch # noqa: F401 except ImportError: logger.error( 'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see' ' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation' ' instructions.' ) raise # check if we have bf16 weights _lowerCamelCase : Union[str, Any] = flatten_dict(jax.tree_util.tree_map(lambda lowercase__ : x.dtype == jnp.bfloataa , lowercase__ ) ).values() if any(lowercase__ ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( 'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ' 'before loading those in PyTorch model.' ) _lowerCamelCase : Any = jax.tree_util.tree_map( lambda lowercase__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , lowercase__ ) _lowerCamelCase : Tuple = '' _lowerCamelCase : Union[str, Any] = flatten_dict(lowercase__ , sep='.' ) _lowerCamelCase : List[Any] = pt_model.state_dict() # keep track of unexpected & missing keys _lowerCamelCase : str = [] _lowerCamelCase : List[str] = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): _lowerCamelCase : int = flax_key_tuple.split('.' ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: _lowerCamelCase : str = flax_key_tuple_array[:-1] + ['weight'] _lowerCamelCase : Optional[Any] = jnp.transpose(lowercase__ , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": _lowerCamelCase : List[Any] = flax_key_tuple_array[:-1] + ['weight'] _lowerCamelCase : Tuple = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": _lowerCamelCase : str = flax_key_tuple_array[:-1] + ['weight'] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(lowercase__ ): _lowerCamelCase : List[str] = ( flax_key_tuple_string.replace('_0' , '.0' ) .replace('_1' , '.1' ) .replace('_2' , '.2' ) .replace('_3' , '.3' ) .replace('_4' , '.4' ) .replace('_5' , '.5' ) .replace('_6' , '.6' ) .replace('_7' , '.7' ) .replace('_8' , '.8' ) .replace('_9' , '.9' ) ) _lowerCamelCase : int = '.'.join(lowercase__ ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( f'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ''' f'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) else: # add weight to pytorch dict _lowerCamelCase : str = np.asarray(lowercase__ ) if not isinstance(lowercase__ , np.ndarray ) else flax_tensor _lowerCamelCase : List[str] = torch.from_numpy(lowercase__ ) # remove from missing keys missing_keys.remove(lowercase__ ) else: # weight is not expected by PyTorch model unexpected_keys.append(lowercase__ ) pt_model.load_state_dict(lowercase__ ) # re-transform missing_keys to list _lowerCamelCase : Dict = list(lowercase__ ) if len(lowercase__ ) > 0: logger.warning( 'Some weights of the Flax model were not used when initializing the PyTorch model' f''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing''' f''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture''' ' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This' f''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect''' ' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a' ' FlaxBertForSequenceClassification model).' ) if len(lowercase__ ) > 0: logger.warning( f'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly''' f''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to''' ' use it for predictions and inference.' ) return pt_model
96
"""simple docstring""" def _snake_case ( lowercase__ ): stooge(lowercase__ , 0 , len(lowercase__ ) - 1 ) return arr def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: _lowerCamelCase, _lowerCamelCase : Optional[Any] = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: _lowerCamelCase : Union[str, Any] = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(lowercase__ , lowercase__ , (h - t) ) # Recursively sort last 2/3 elements stooge(lowercase__ , i + t , (lowercase__) ) # Recursively sort first 2/3 elements stooge(lowercase__ , lowercase__ , (h - t) ) if __name__ == "__main__": lowercase__ = input("""Enter numbers separated by a comma:\n""").strip() lowercase__ = [int(item) for item in user_input.split(""",""")] print(stooge_sort(unsorted))
96
1
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { """BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""", # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class _lowerCAmelCase ( snake_case_ ): __UpperCAmelCase : Tuple = '''altclip_text_model''' def __init__( self , UpperCamelCase__=25_0002 , UpperCamelCase__=1024 , UpperCamelCase__=24 , UpperCamelCase__=16 , UpperCamelCase__=4096 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=514 , UpperCamelCase__=1 , UpperCamelCase__=0.02 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-05 , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=768 , **UpperCamelCase__ , ) -> Optional[int]: '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ ) snake_case : Any = vocab_size snake_case : List[Any] = hidden_size snake_case : Optional[int] = num_hidden_layers snake_case : Optional[Any] = num_attention_heads snake_case : Dict = hidden_act snake_case : Dict = intermediate_size snake_case : int = hidden_dropout_prob snake_case : Optional[int] = attention_probs_dropout_prob snake_case : Union[str, Any] = max_position_embeddings snake_case : Optional[int] = type_vocab_size snake_case : Dict = initializer_range snake_case : int = initializer_factor snake_case : Union[str, Any] = layer_norm_eps snake_case : List[Any] = position_embedding_type snake_case : Any = use_cache snake_case : str = project_dim class _lowerCAmelCase ( snake_case_ ): __UpperCAmelCase : Tuple = '''altclip_vision_model''' def __init__( self , UpperCamelCase__=768 , UpperCamelCase__=3072 , UpperCamelCase__=512 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3 , UpperCamelCase__=224 , UpperCamelCase__=32 , UpperCamelCase__="quick_gelu" , UpperCamelCase__=1e-5 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , UpperCamelCase__=1.0 , **UpperCamelCase__ , ) -> str: '''simple docstring''' super().__init__(**UpperCamelCase__ ) snake_case : Optional[int] = hidden_size snake_case : str = intermediate_size snake_case : List[str] = projection_dim snake_case : Optional[Any] = num_hidden_layers snake_case : Optional[int] = num_attention_heads snake_case : str = num_channels snake_case : List[str] = patch_size snake_case : List[Any] = image_size snake_case : Union[str, Any] = initializer_range snake_case : Optional[Any] = initializer_factor snake_case : Any = attention_dropout snake_case : Dict = layer_norm_eps snake_case : List[str] = hidden_act @classmethod def lowerCamelCase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(UpperCamelCase__ ) snake_case ,snake_case : str = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get("model_type" ) == "altclip": snake_case : Optional[Any] = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ ) class _lowerCAmelCase ( snake_case_ ): __UpperCAmelCase : str = '''altclip''' __UpperCAmelCase : Optional[Any] = True def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=768 , UpperCamelCase__=2.6592 , **UpperCamelCase__ ) -> Any: '''simple docstring''' snake_case : List[str] = kwargs.pop("text_config_dict" , UpperCamelCase__ ) snake_case : Union[str, Any] = kwargs.pop("vision_config_dict" , UpperCamelCase__ ) super().__init__(**UpperCamelCase__ ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: snake_case : List[str] = {} # This is the complete result when using `text_config_dict`. snake_case : Dict = AltCLIPTextConfig(**UpperCamelCase__ ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: snake_case : Optional[Any] = ( F'`{key}` is found in both `text_config_dict` and `text_config` but with different values. ' F'The value `text_config_dict["{key}"]` will be used instead.' ) # If inferred from default argument values (just to be super careful) else: snake_case : Any = ( F'`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ' F'value `text_config["{key}"]` will be overriden.' ) logger.warning(UpperCamelCase__ ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: snake_case : Union[str, Any] = {} # This is the complete result when using `vision_config_dict`. snake_case : int = AltCLIPVisionConfig(**UpperCamelCase__ ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: snake_case : Optional[int] = { str(UpperCamelCase__ ): value for key, value in _vision_config_dict["id2label"].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: snake_case : int = ( F'`{key}` is found in both `vision_config_dict` and `vision_config` but with different ' F'values. The value `vision_config_dict["{key}"]` will be used instead.' ) # If inferred from default argument values (just to be super careful) else: snake_case : Optional[Any] = ( F'`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ' F'The value `vision_config["{key}"]` will be overriden.' ) logger.warning(UpperCamelCase__ ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: snake_case : Optional[int] = {} logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values." ) if vision_config is None: snake_case : Dict = {} logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values." ) snake_case : Dict = AltCLIPTextConfig(**UpperCamelCase__ ) snake_case : Tuple = AltCLIPVisionConfig(**UpperCamelCase__ ) snake_case : int = projection_dim snake_case : List[str] = logit_scale_init_value snake_case : int = 1.0 @classmethod def lowerCamelCase ( cls , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]: '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCamelCase__ ) def lowerCamelCase ( self ) -> Optional[Any]: '''simple docstring''' snake_case : Tuple = copy.deepcopy(self.__dict__ ) snake_case : Optional[int] = self.text_config.to_dict() snake_case : str = self.vision_config.to_dict() snake_case : Optional[int] = self.__class__.model_type return output
112
"""simple docstring""" import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class _lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]: '''simple docstring''' self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) for a, b in zip(UpperCamelCase__ , UpperCamelCase__ ): self.assertAlmostEqual(UpperCamelCase__ , UpperCamelCase__ , delta=UpperCamelCase__ ) def lowerCamelCase ( self ) -> Any: '''simple docstring''' snake_case : List[Any] = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(UpperCamelCase__ ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 ) def lowerCamelCase ( self ) -> int: '''simple docstring''' snake_case : Optional[Any] = None ops.enable_eager_execution_internal() snake_case : str = tf.config.list_physical_devices("CPU" ) if len(UpperCamelCase__ ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) snake_case : Optional[int] = tf.config.list_logical_devices(device_type="CPU" ) snake_case : List[str] = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): snake_case : int = GradientAccumulator() snake_case : int = tf.Variable([4.0, 3.0] ) snake_case ,snake_case : Any = create_optimizer(5e-5 , 10 , 5 ) snake_case : Tuple = tf.Variable([0.0, 0.0] , trainable=UpperCamelCase__ ) def accumulate_on_replica(UpperCamelCase__ ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(UpperCamelCase__ , UpperCamelCase__ ): with strategy.scope(): snake_case : Union[str, Any] = strategy.experimental_local_results(UpperCamelCase__ ) local_variables[0].assign(UpperCamelCase__ ) local_variables[1].assign(UpperCamelCase__ ) strategy.run(UpperCamelCase__ , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(UpperCamelCase__ ) def _check_local_values(UpperCamelCase__ , UpperCamelCase__ ): snake_case : List[Any] = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , UpperCamelCase__ , tol=1e-2 ) self.assertListAlmostEqual(values[1].value() , UpperCamelCase__ , tol=1e-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
112
1
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging lowerCAmelCase_ : str = logging.get_logger(__name__) lowerCAmelCase_ : Optional[int] = { 'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json', # See all Marian models at https://huggingface.co/models?filter=marian } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='marian' __a =['past_key_values'] __a ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : List[str] , __a : List[str]=5_81_01 , __a : str=None , __a : int=10_24 , __a : Optional[Any]=12 , __a : int=40_96 , __a : List[str]=16 , __a : Optional[int]=12 , __a : str=40_96 , __a : Union[str, Any]=16 , __a : List[str]=0.0 , __a : str=0.0 , __a : Optional[int]=True , __a : Optional[int]=True , __a : List[str]="gelu" , __a : Dict=10_24 , __a : Optional[Any]=0.1 , __a : Union[str, Any]=0.0 , __a : Tuple=0.0 , __a : Any=0.02 , __a : str=5_81_00 , __a : int=False , __a : int=5_81_00 , __a : int=0 , __a : Dict=0 , __a : List[str]=True , **__a : Union[str, Any] , ): _a = vocab_size _a = decoder_vocab_size or vocab_size _a = max_position_embeddings _a = d_model _a = encoder_ffn_dim _a = encoder_layers _a = encoder_attention_heads _a = decoder_ffn_dim _a = decoder_layers _a = decoder_attention_heads _a = dropout _a = attention_dropout _a = activation_dropout _a = activation_function _a = init_std _a = encoder_layerdrop _a = decoder_layerdrop _a = use_cache _a = encoder_layers _a = scale_embedding # scale factor will be sqrt(d_model) if True _a = share_encoder_decoder_embeddings super().__init__( pad_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , forced_eos_token_id=__a , **__a , ) class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def UpperCamelCase__ ( self : str ): if self.task in ["default", "seq2seq-lm"]: _a = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: _a = {0: "batch"} _a = {0: "batch", 1: "past_decoder_sequence + sequence"} else: _a = {0: "batch", 1: "decoder_sequence"} _a = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(__a , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. _a = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: _a , _a = self.num_layers for i in range(__a ): _a = {0: "batch", 2: "past_sequence + sequence"} _a = {0: "batch", 2: "past_sequence + sequence"} else: _a = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def UpperCamelCase__ ( self : List[str] ): if self.task in ["default", "seq2seq-lm"]: _a = super().outputs else: _a = super(__a , self ).outputs if self.use_past: _a , _a = self.num_layers for i in range(__a ): _a = {0: "batch", 2: "past_sequence + sequence"} _a = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def UpperCamelCase__ ( self : Tuple , __a : PreTrainedTokenizer , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , ): _a = self._generate_dummy_inputs_for_encoder_and_decoder( __a , __a , __a , __a , __a ) # Generate decoder inputs _a = seq_length if not self.use_past else 1 _a = self._generate_dummy_inputs_for_encoder_and_decoder( __a , __a , __a , __a , __a ) _a = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()} _a = dict(**__a , **__a ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch _a , _a = common_inputs["input_ids"].shape _a = common_inputs["decoder_input_ids"].shape[1] _a , _a = self.num_attention_heads _a = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) _a = decoder_seq_length + 3 _a = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) _a = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(__a , __a )] , dim=1 ) _a = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered _a , _a = self.num_layers _a = min(__a , __a ) _a = max(__a , __a ) - min_num_layers _a = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(__a ): common_inputs["past_key_values"].append( ( torch.zeros(__a ), torch.zeros(__a ), torch.zeros(__a ), torch.zeros(__a ), ) ) # TODO: test this. _a = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(__a , __a ): common_inputs["past_key_values"].append((torch.zeros(__a ), torch.zeros(__a )) ) return common_inputs def UpperCamelCase__ ( self : Union[str, Any] , __a : PreTrainedTokenizer , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , ): _a = self._generate_dummy_inputs_for_encoder_and_decoder( __a , __a , __a , __a , __a ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch _a , _a = common_inputs["input_ids"].shape # Not using the same length for past_key_values _a = seqlen + 2 _a , _a = self.num_layers _a , _a = self.num_attention_heads _a = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) _a = common_inputs["attention_mask"].dtype _a = torch.cat( [common_inputs["attention_mask"], torch.ones(__a , __a , dtype=__a )] , dim=1 ) _a = [ (torch.zeros(__a ), torch.zeros(__a )) for _ in range(__a ) ] return common_inputs def UpperCamelCase__ ( self : str , __a : PreTrainedTokenizer , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _a = compute_effective_axis_dimension( __a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _a = tokenizer.num_special_tokens_to_add(__a ) _a = compute_effective_axis_dimension( __a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a ) # Generate dummy inputs according to compute batch and sequence _a = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size _a = dict(tokenizer(__a , return_tensors=__a ) ) return common_inputs def UpperCamelCase__ ( self : Optional[int] , __a : PreTrainedTokenizer , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , ): if self.task in ["default", "seq2seq-lm"]: _a = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a ) else: _a = self._generate_dummy_inputs_for_causal_lm( __a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a ) return common_inputs def UpperCamelCase__ ( self : Dict , __a : Dict , __a : Optional[int] , __a : str , __a : Tuple ): if self.task in ["default", "seq2seq-lm"]: _a = super()._flatten_past_key_values_(__a , __a , __a , __a ) else: _a = super(__a , self )._flatten_past_key_values_( __a , __a , __a , __a ) @property def UpperCamelCase__ ( self : Optional[Any] ): return 1e-4
63
'''simple docstring''' def _lowerCamelCase ( lowercase : str ) -> list: if n_term == "": return [] _a = [] for temp in range(int(lowercase ) ): series.append(F'1/{temp + 1}' if series else "1" ) return series if __name__ == "__main__": lowerCAmelCase_ : Union[str, Any] = input('Enter the last number (nth term) of the Harmonic Series') print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n') print(harmonic_series(nth_term))
63
1
import itertools import string from collections.abc import Generator, Iterable def A ( __UpperCAmelCase , __UpperCAmelCase ) -> Generator[tuple[str, ...], None, None]: '''simple docstring''' UpperCAmelCase_ = iter(__UpperCAmelCase ) while True: UpperCAmelCase_ = tuple(itertools.islice(__UpperCAmelCase , __UpperCAmelCase ) ) if not chunk: return yield chunk def A ( __UpperCAmelCase ) -> str: '''simple docstring''' UpperCAmelCase_ = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] ) UpperCAmelCase_ = '''''' if len(__UpperCAmelCase ) < 2: return dirty for i in range(len(__UpperCAmelCase ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(__UpperCAmelCase ) & 1: clean += "X" return clean def A ( __UpperCAmelCase ) -> list[str]: '''simple docstring''' UpperCAmelCase_ = '''ABCDEFGHIKLMNOPQRSTUVWXYZ''' # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler UpperCAmelCase_ = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(__UpperCAmelCase ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(__UpperCAmelCase ) return table def A ( __UpperCAmelCase , __UpperCAmelCase ) -> str: '''simple docstring''' UpperCAmelCase_ = generate_table(__UpperCAmelCase ) UpperCAmelCase_ = prepare_input(__UpperCAmelCase ) UpperCAmelCase_ = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(__UpperCAmelCase , 2 ): UpperCAmelCase_ , UpperCAmelCase_ = divmod(table.index(__UpperCAmelCase ) , 5 ) UpperCAmelCase_ , UpperCAmelCase_ = divmod(table.index(__UpperCAmelCase ) , 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def A ( __UpperCAmelCase , __UpperCAmelCase ) -> str: '''simple docstring''' UpperCAmelCase_ = generate_table(__UpperCAmelCase ) UpperCAmelCase_ = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(__UpperCAmelCase , 2 ): UpperCAmelCase_ , UpperCAmelCase_ = divmod(table.index(__UpperCAmelCase ) , 5 ) UpperCAmelCase_ , UpperCAmelCase_ = divmod(table.index(__UpperCAmelCase ) , 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
366
import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed UpperCamelCase_ = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"): from run_translation import main # noqa set_seed(42) UpperCamelCase_ = "sshleifer/student_marian_en_ro_6_1" UpperCamelCase_ = "sshleifer/tiny-mbart" @require_torch class a_ ( _snake_case ): def __a ( self :str , _lowercase :Any=False , _lowercase :Tuple=None , _lowercase :Dict=True , _lowercase :Tuple=True , _lowercase :List[Any]=True , _lowercase :List[str]=True , ) -> int: UpperCAmelCase_ = self.run_trainer( eval_steps=1 , max_len=12 , model_name=_lowercase , num_train_epochs=1 , distributed=_lowercase , extra_args_str=_lowercase , predict_with_generate=_lowercase , do_train=_lowercase , do_eval=_lowercase , do_predict=_lowercase , ) UpperCAmelCase_ = TrainerState.load_from_json(os.path.join(_lowercase , '''trainer_state.json''')).log_history if not do_eval: return UpperCAmelCase_ = [log for log in logs if '''eval_loss''' in log.keys()] UpperCAmelCase_ = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats UpperCAmelCase_ = eval_metrics[-1] assert isinstance(last_step_stats['''eval_bleu'''] , _lowercase) assert not math.isnan(float(last_step_stats['''eval_loss'''])), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def __a ( self :Dict) -> str: self.run_seqaseq_quick() @require_torch_multi_gpu def __a ( self :Any) -> int: self.run_seqaseq_quick(distributed=_lowercase) @require_torch_multi_gpu def __a ( self :int) -> Any: self.run_seqaseq_quick(distributed=_lowercase) @unittest.skip('''Requires an update of the env running those tests''') @require_torch_multi_gpu @require_fairscale def __a ( self :Tuple) -> Any: self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp simple''') @unittest.skip('''Requires an update of the env running those tests''') @require_torch_multi_gpu @require_fairscale def __a ( self :Tuple) -> List[str]: self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp simple --fp16''') @unittest.skip('''Requires an update of the env running those tests''') @require_torch_multi_gpu @require_fairscale def __a ( self :Union[str, Any]) -> Any: self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=_lowercase) @unittest.skip('''Requires an update of the env running those tests''') @require_torch_multi_gpu @require_fairscale def __a ( self :int) -> Any: self.run_seqaseq_quick( distributed=_lowercase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=_lowercase) @require_apex @require_torch_gpu def __a ( self :Tuple) -> str: # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--fp16 --fp16_backend=apex''') # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--fp16 --fp16_backend=apex''') @parameterized.expand(['''base''', '''low''', '''high''', '''mixed''']) @require_torch_multi_gpu def __a ( self :str , _lowercase :Any) -> List[str]: # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout UpperCAmelCase_ = { # test with the default log_level - should be info and thus log info once '''base''': {'''extra_args_str''': '''''', '''n_matches''': 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes '''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica '''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1}, # test with high log_level and log_level_replica - should be quiet on all processes '''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0}, } UpperCAmelCase_ = experiments[experiment_id] UpperCAmelCase_ = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False} UpperCAmelCase_ = '''Running training''' with CaptureStderr() as cl: self.run_seqaseq_quick(**_lowercase , extra_args_str=data['''extra_args_str''']) UpperCAmelCase_ = len(re.findall(_lowercase , cl.err)) self.assertEqual(_lowercase , data['''n_matches''']) @slow def __a ( self :Any) -> Dict: UpperCAmelCase_ = self.run_trainer( eval_steps=2 , max_len=128 , model_name=_lowercase , learning_rate=3E-4 , num_train_epochs=10 , distributed=_lowercase , ) # Check metrics UpperCAmelCase_ = TrainerState.load_from_json(os.path.join(_lowercase , '''trainer_state.json''')).log_history UpperCAmelCase_ = [log for log in logs if '''eval_loss''' in log.keys()] UpperCAmelCase_ = eval_metrics[0] UpperCAmelCase_ = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats['''eval_bleu'''] , _lowercase) # test if do_predict saves generations and metrics UpperCAmelCase_ = os.listdir(_lowercase) UpperCAmelCase_ = {os.path.basename(_lowercase) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def __a ( self :List[str]) -> str: from transformers.training_args import OptimizerNames def train_and_return_metrics(_lowercase :str) -> Tuple[int, float]: UpperCAmelCase_ = '''--skip_memory_metrics 0''' UpperCAmelCase_ = self.run_trainer( max_len=128 , model_name=_lowercase , learning_rate=3E-4 , num_train_epochs=1 , optim=_lowercase , distributed=_lowercase , extra_args_str=_lowercase , do_eval=_lowercase , do_predict=_lowercase , n_gpus_to_use=1 , ) # Check metrics UpperCAmelCase_ = TrainerState.load_from_json(Path(_lowercase , '''trainer_state.json''')).log_history UpperCAmelCase_ = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20) UpperCAmelCase_ = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20) UpperCAmelCase_ = logs[0]['''train_loss'''] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value) UpperCAmelCase_ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb UpperCAmelCase_ = gpu_peak_mem_orig + gpu_alloc_mem_orig UpperCAmelCase_ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb UpperCAmelCase_ = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings UpperCAmelCase_ = 120 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( _lowercase , _lowercase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got''' f" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and" f" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , ) self.assertGreater( _lowercase , _lowercase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got''' f" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and" f" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , ) self.assertEqual( _lowercase , _lowercase , f"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}") def __a ( self :Any , _lowercase :int , _lowercase :str , _lowercase :int , _lowercase :float = 3E-3 , _lowercase :str = "adafactor" , _lowercase :bool = False , _lowercase :str = None , _lowercase :int = 0 , _lowercase :bool = True , _lowercase :bool = True , _lowercase :bool = True , _lowercase :bool = True , _lowercase :int = None , ) -> List[Any]: UpperCAmelCase_ = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro''' UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(_lowercase)}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(_lowercase)}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split() UpperCAmelCase_ = f"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(_lowercase)}\n ".split() UpperCAmelCase_ = ''' --do_predict '''.split() UpperCAmelCase_ = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += f"--optim {optim}".split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: UpperCAmelCase_ = get_gpu_count() UpperCAmelCase_ = get_torch_dist_unique_port() UpperCAmelCase_ = f"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split() UpperCAmelCase_ = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(_lowercase , env=self.get_env()) else: UpperCAmelCase_ = ['''run_translation.py'''] + args with patch.object(_lowercase , '''argv''' , _lowercase): main() return output_dir
344
0
'''simple docstring''' import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList __lowercase : Optional[Any] = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif'''] class __lowercase ( _lowercase ): def __init__(self , A , A , A=None , A=1 ): lowerCamelCase_ : Optional[int] = tokenizer lowerCamelCase_ : Tuple = dataset lowerCamelCase_ : int = len(A ) if n_tasks is None else n_tasks lowerCamelCase_ : Union[str, Any] = n_copies def __iter__(self ): lowerCamelCase_ : List[str] = [] for task in range(self.n_tasks ): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() ) lowerCamelCase_ : List[str] = self.tokenizer(A , padding=A , return_tensors='''pt''' ) for task in range(self.n_tasks ): for _ in range(self.n_copies ): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class __lowercase ( _lowercase ): def __init__(self , A , A , A ): lowerCamelCase_ : Optional[Any] = start_length lowerCamelCase_ : Optional[Any] = eof_strings lowerCamelCase_ : List[str] = tokenizer def __call__(self , A , A , **A ): lowerCamelCase_ : Optional[int] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] ) lowerCamelCase_ : str = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) ) return all(A ) def lowercase_ ( _lowercase ) -> Optional[Any]: '''simple docstring''' lowerCamelCase_ : Optional[int] = re.split('''(%s)''' % '''|'''.join(_lowercase ) , _lowercase ) # last string should be "" return "".join(string_list[:-2] ) def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=20 , **_lowercase ) -> Dict: '''simple docstring''' lowerCamelCase_ : Tuple = defaultdict(_lowercase ) # dict of list of generated tokens for step, batch in tqdm(enumerate(_lowercase ) ): with torch.no_grad(): lowerCamelCase_ : Dict = batch['''ids'''].shape[-1] lowerCamelCase_ : Any = accelerator.unwrap_model(_lowercase ).generate( input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=_lowercase , **_lowercase ) # each task is generated batch_size times lowerCamelCase_ : List[Any] = batch['''task_id'''].repeat(_lowercase ) lowerCamelCase_ : Optional[int] = accelerator.pad_across_processes( _lowercase , dim=1 , pad_index=tokenizer.pad_token_id ) lowerCamelCase_, lowerCamelCase_ : List[Any] = accelerator.gather((generated_tokens, generated_tasks) ) lowerCamelCase_ : int = generated_tokens.cpu().numpy() lowerCamelCase_ : Dict = generated_tasks.cpu().numpy() for task, generated_tokens in zip(_lowercase , _lowercase ): gen_token_dict[task].append(_lowercase ) lowerCamelCase_ : List[Any] = [[] for _ in range(_lowercase )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: lowerCamelCase_ : List[Any] = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase ) code_gens[task].append(remove_last_block(_lowercase ) ) return code_gens def lowercase_ ( ) -> str: '''simple docstring''' lowerCamelCase_ : Union[str, Any] = HfArgumentParser(_lowercase ) lowerCamelCase_ : Optional[int] = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric lowerCamelCase_ : Dict = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing lowerCamelCase_ : Tuple = '''false''' if args.num_workers is None: lowerCamelCase_ : List[Any] = multiprocessing.cpu_count() # Use dataset load to feed to accelerate lowerCamelCase_ : Optional[int] = Accelerator() set_seed(args.seed , device_specific=_lowercase ) # Load model and tokenizer lowerCamelCase_ : int = AutoTokenizer.from_pretrained(args.model_ckpt ) lowerCamelCase_ : Optional[int] = tokenizer.eos_token lowerCamelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings lowerCamelCase_ : List[Any] = { '''do_sample''': args.do_sample, '''temperature''': args.temperature, '''max_new_tokens''': args.max_new_tokens, '''top_p''': args.top_p, '''top_k''': args.top_k, '''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowercase , _lowercase )] ), } # Load evaluation dataset and metric lowerCamelCase_ : Any = load_dataset('''openai_humaneval''' ) lowerCamelCase_ : Tuple = load_metric('''code_eval''' ) lowerCamelCase_ : Tuple = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] ) lowerCamelCase_ : Optional[int] = args.n_samples // args.batch_size lowerCamelCase_ : Union[str, Any] = TokenizedDataset(_lowercase , human_eval['''test'''] , n_copies=_lowercase , n_tasks=_lowercase ) # do not confuse args.batch_size, which is actually the num_return_sequences lowerCamelCase_ : Union[str, Any] = DataLoader(_lowercase , batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: lowerCamelCase_ : Tuple = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] ) except ValueError as exception: print( '''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`''' ''' flag to enable code evaluation.''' ) raise exception lowerCamelCase_, lowerCamelCase_ : Tuple = accelerator.prepare(_lowercase , _lowercase ) lowerCamelCase_ : Union[str, Any] = complete_code( _lowercase , _lowercase , _lowercase , _lowercase , n_tasks=_lowercase , batch_size=args.batch_size , **_lowercase , ) if accelerator.is_main_process: lowerCamelCase_ : str = [] for task in tqdm(range(_lowercase ) ): lowerCamelCase_ : Any = human_eval['''test'''][task]['''test'''] lowerCamelCase_ : Tuple = F"""check({human_eval["test"][task]["entry_point"]})""" references.append('''\n''' + test_func + '''\n''' + entry_point ) # Evaluate completions with "code_eval" metric lowerCamelCase_, lowerCamelCase_ : str = code_eval_metric.compute( references=_lowercase , predictions=_lowercase , num_workers=args.num_workers ) print(F"""Results: {pass_at_k}""" ) # Save results to json file with open(args.output_file , '''w''' ) as fp: json.dump(_lowercase , _lowercase ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
318
'''simple docstring''' from itertools import permutations def lowercase_ ( _lowercase ) -> bool: '''simple docstring''' if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False lowerCamelCase_ : int = [7, 11, 13, 17] for i, test in enumerate(_lowercase ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def lowercase_ ( _lowercase = 10 ) -> int: '''simple docstring''' return sum( int(''''''.join(map(_lowercase , _lowercase ) ) ) for num in permutations(range(_lowercase ) ) if is_substring_divisible(_lowercase ) ) if __name__ == "__main__": print(f'{solution() = }')
318
1
"""simple docstring""" import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging a :List[str] = logging.get_logger(__name__) a :Union[str, Any] = { 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/config.json', # See all BART models at https://huggingface.co/models?filter=bart } class __a (_lowerCAmelCase): '''simple docstring''' _SCREAMING_SNAKE_CASE :List[Any] = "bart" _SCREAMING_SNAKE_CASE :List[Any] = ["past_key_values"] _SCREAMING_SNAKE_CASE :Tuple = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , _a=50_265 , _a=1_024 , _a=12 , _a=4_096 , _a=16 , _a=12 , _a=4_096 , _a=16 , _a=0.0 , _a=0.0 , _a="gelu" , _a=1_024 , _a=0.1 , _a=0.0 , _a=0.0 , _a=0.02 , _a=0.0 , _a=False , _a=True , _a=3 , _a=1 , _a=0 , _a=2 , _a=True , _a=2 , _a=2 , **_a , ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = vocab_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_position_embeddings SCREAMING_SNAKE_CASE__ : Tuple = d_model SCREAMING_SNAKE_CASE__ : Any = encoder_ffn_dim SCREAMING_SNAKE_CASE__ : List[str] = encoder_layers SCREAMING_SNAKE_CASE__ : List[str] = encoder_attention_heads SCREAMING_SNAKE_CASE__ : List[Any] = decoder_ffn_dim SCREAMING_SNAKE_CASE__ : List[str] = decoder_layers SCREAMING_SNAKE_CASE__ : List[Any] = decoder_attention_heads SCREAMING_SNAKE_CASE__ : List[Any] = dropout SCREAMING_SNAKE_CASE__ : Any = attention_dropout SCREAMING_SNAKE_CASE__ : Optional[Any] = activation_dropout SCREAMING_SNAKE_CASE__ : Union[str, Any] = activation_function SCREAMING_SNAKE_CASE__ : Any = init_std SCREAMING_SNAKE_CASE__ : int = encoder_layerdrop SCREAMING_SNAKE_CASE__ : Tuple = decoder_layerdrop SCREAMING_SNAKE_CASE__ : List[Any] = classifier_dropout SCREAMING_SNAKE_CASE__ : List[str] = use_cache SCREAMING_SNAKE_CASE__ : Dict = encoder_layers SCREAMING_SNAKE_CASE__ : str = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=_lowercase , pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , forced_eos_token_id=_lowercase , **_lowercase , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , _lowercase ): SCREAMING_SNAKE_CASE__ : List[Any] = self.bos_token_id warnings.warn( f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ''' """The config can simply be saved and uploaded again to be fixed.""" ) class __a (_lowerCAmelCase): '''simple docstring''' @property def _a ( self ) -> Dict: """simple docstring""" if self.task in ["default", "seq2seq-lm"]: SCREAMING_SNAKE_CASE__ : int = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: SCREAMING_SNAKE_CASE__ : List[str] = {0: """batch"""} SCREAMING_SNAKE_CASE__ : List[str] = {0: """batch""", 1: """past_decoder_sequence + sequence"""} else: SCREAMING_SNAKE_CASE__ : Union[str, Any] = {0: """batch""", 1: """decoder_sequence"""} SCREAMING_SNAKE_CASE__ : Optional[int] = {0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(_lowercase , direction="""inputs""" ) elif self.task == "causal-lm": # TODO: figure this case out. SCREAMING_SNAKE_CASE__ : Tuple = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.num_layers for i in range(_lowercase ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = {0: """batch""", 2: """past_sequence + sequence"""} SCREAMING_SNAKE_CASE__ : Tuple = {0: """batch""", 2: """past_sequence + sequence"""} else: SCREAMING_SNAKE_CASE__ : Optional[Any] = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}), ("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}), ] ) return common_inputs @property def _a ( self ) -> Union[str, Any]: """simple docstring""" if self.task in ["default", "seq2seq-lm"]: SCREAMING_SNAKE_CASE__ : str = super().outputs else: SCREAMING_SNAKE_CASE__ : int = super(_lowercase , self ).outputs if self.use_past: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.num_layers for i in range(_lowercase ): SCREAMING_SNAKE_CASE__ : Optional[Any] = {0: """batch""", 2: """past_sequence + sequence"""} SCREAMING_SNAKE_CASE__ : Optional[Any] = {0: """batch""", 2: """past_sequence + sequence"""} return common_outputs def _a ( self , _a , _a = -1 , _a = -1 , _a = False , _a = None , ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) # Generate decoder inputs SCREAMING_SNAKE_CASE__ : Union[str, Any] = seq_length if not self.use_past else 1 SCREAMING_SNAKE_CASE__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) SCREAMING_SNAKE_CASE__ : Dict = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} SCREAMING_SNAKE_CASE__ : List[str] = dict(**_lowercase , **_lowercase ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = common_inputs["""input_ids"""].shape SCREAMING_SNAKE_CASE__ : List[str] = common_inputs["""decoder_input_ids"""].shape[1] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.num_attention_heads SCREAMING_SNAKE_CASE__ : Union[str, Any] = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) SCREAMING_SNAKE_CASE__ : Tuple = decoder_seq_length + 3 SCREAMING_SNAKE_CASE__ : str = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) SCREAMING_SNAKE_CASE__ : List[str] = torch.cat( [common_inputs["""decoder_attention_mask"""], torch.ones(_lowercase , _lowercase )] , dim=1 ) SCREAMING_SNAKE_CASE__ : Optional[int] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self.num_layers SCREAMING_SNAKE_CASE__ : List[str] = min(_lowercase , _lowercase ) SCREAMING_SNAKE_CASE__ : Dict = max(_lowercase , _lowercase ) - min_num_layers SCREAMING_SNAKE_CASE__ : Optional[int] = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder""" for _ in range(_lowercase ): common_inputs["past_key_values"].append( ( torch.zeros(_lowercase ), torch.zeros(_lowercase ), torch.zeros(_lowercase ), torch.zeros(_lowercase ), ) ) # TODO: test this. SCREAMING_SNAKE_CASE__ : int = encoder_shape if remaining_side_name == """encoder""" else decoder_shape for _ in range(_lowercase , _lowercase ): common_inputs["past_key_values"].append((torch.zeros(_lowercase ), torch.zeros(_lowercase )) ) return common_inputs def _a ( self , _a , _a = -1 , _a = -1 , _a = False , _a = None , ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values SCREAMING_SNAKE_CASE__ : Dict = seqlen + 2 SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.num_layers SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.num_attention_heads SCREAMING_SNAKE_CASE__ : Any = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) SCREAMING_SNAKE_CASE__ : Optional[Any] = common_inputs["""attention_mask"""].dtype SCREAMING_SNAKE_CASE__ : List[str] = torch.cat( [common_inputs["""attention_mask"""], torch.ones(_lowercase , _lowercase , dtype=_lowercase )] , dim=1 ) SCREAMING_SNAKE_CASE__ : Any = [ (torch.zeros(_lowercase ), torch.zeros(_lowercase )) for _ in range(_lowercase ) ] return common_inputs def _a ( self , _a , _a = -1 , _a = -1 , _a = False , _a = None , ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = compute_effective_axis_dimension( _lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.num_special_tokens_to_add(_lowercase ) SCREAMING_SNAKE_CASE__ : str = compute_effective_axis_dimension( _lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowercase ) # Generate dummy inputs according to compute batch and sequence SCREAMING_SNAKE_CASE__ : Any = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size SCREAMING_SNAKE_CASE__ : Optional[int] = dict(tokenizer(_lowercase , return_tensors=_lowercase ) ) return common_inputs def _a ( self , _a , _a = -1 , _a = -1 , _a = False , _a = None , ) -> List[Any]: """simple docstring""" if self.task in ["default", "seq2seq-lm"]: SCREAMING_SNAKE_CASE__ : int = self._generate_dummy_inputs_for_default_and_seqaseq_lm( _lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase ) elif self.task == "causal-lm": SCREAMING_SNAKE_CASE__ : Tuple = self._generate_dummy_inputs_for_causal_lm( _lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase ) else: SCREAMING_SNAKE_CASE__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase ) return common_inputs def _a ( self , _a , _a , _a , _a ) -> Optional[int]: """simple docstring""" if self.task in ["default", "seq2seq-lm"]: SCREAMING_SNAKE_CASE__ : List[str] = super()._flatten_past_key_values_(_lowercase , _lowercase , _lowercase , _lowercase ) else: SCREAMING_SNAKE_CASE__ : str = super(_lowercase , self )._flatten_past_key_values_( _lowercase , _lowercase , _lowercase , _lowercase )
369
"""simple docstring""" from math import loga def _lowercase ( __lowerCAmelCase ) -> int: if a < 0: raise ValueError("""Input value must be a positive integer""" ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError("""Input value must be a 'int' type""" ) return 0 if (a == 0) else int(loga(a & -a ) ) if __name__ == "__main__": import doctest doctest.testmod()
56
0
"""simple docstring""" import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class SCREAMING_SNAKE_CASE__ ( lowercase ): """simple docstring""" def __init__( self , *snake_case__ , snake_case__=None , snake_case__=None , **snake_case__ ): """simple docstring""" super().__init__(*snake_case__ , **snake_case__ ) lowerCAmelCase : Tuple = eval_examples lowerCAmelCase : int = post_process_function def lowercase__ ( self , snake_case__ = None , snake_case__=None , snake_case__ = None , snake_case__ = "eval" , **snake_case__ , ): """simple docstring""" lowerCAmelCase : List[Any] = gen_kwargs.copy() lowerCAmelCase : Dict = ( gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length ) lowerCAmelCase : str = ( gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams ) lowerCAmelCase : List[str] = gen_kwargs lowerCAmelCase : Optional[Any] = self.eval_dataset if eval_dataset is None else eval_dataset lowerCAmelCase : Dict = self.get_eval_dataloader(snake_case__ ) lowerCAmelCase : Union[str, Any] = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. lowerCAmelCase : Any = self.compute_metrics lowerCAmelCase : Dict = None lowerCAmelCase : Optional[int] = time.time() lowerCAmelCase : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: lowerCAmelCase : List[Any] = eval_loop( snake_case__ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case__ , metric_key_prefix=snake_case__ , ) finally: lowerCAmelCase : Dict = compute_metrics lowerCAmelCase : List[str] = self.args.eval_batch_size * self.args.world_size if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( snake_case__ , snake_case__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default lowerCAmelCase : Optional[int] = self.post_process_function(snake_case__ , snake_case__ , snake_case__ ) lowerCAmelCase : Union[str, Any] = self.compute_metrics(snake_case__ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): lowerCAmelCase : str = metrics.pop(snake_case__ ) metrics.update(output.metrics ) else: lowerCAmelCase : int = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(snake_case__ ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) lowerCAmelCase : Dict = self.callback_handler.on_evaluate(self.args , self.state , self.control , snake_case__ ) return metrics def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__ = "test" , **snake_case__ ): """simple docstring""" lowerCAmelCase : Union[str, Any] = gen_kwargs.copy() lowerCAmelCase : Tuple = self.get_test_dataloader(snake_case__ ) # Temporarily disable metric computation, we will do it in the loop here. lowerCAmelCase : List[Any] = self.compute_metrics lowerCAmelCase : Optional[int] = None lowerCAmelCase : Any = time.time() lowerCAmelCase : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: lowerCAmelCase : str = eval_loop( snake_case__ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case__ , metric_key_prefix=snake_case__ , ) finally: lowerCAmelCase : Optional[Any] = compute_metrics lowerCAmelCase : int = self.args.eval_batch_size * self.args.world_size if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( snake_case__ , snake_case__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output lowerCAmelCase : Any = self.post_process_function(snake_case__ , snake_case__ , snake_case__ , "predict" ) lowerCAmelCase : Union[str, Any] = self.compute_metrics(snake_case__ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): lowerCAmelCase : Optional[int] = metrics.pop(snake_case__ ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=snake_case__ )
108
import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate __UpperCAmelCase : Optional[int] = trt.Logger(trt.Logger.WARNING) __UpperCAmelCase : Tuple = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) __UpperCAmelCase : Optional[Any] = logging.getLogger(__name__) __UpperCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--onnx_model_path", default=None, type=str, required=True, help="Path to ONNX model: ", ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints and predictions will be written.", ) # Other parameters parser.add_argument( "--tokenizer_name", default="", type=str, required=True, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--version_2_with_negative", action="store_true", help="If true, the SQuAD examples contain some that do not have an answer.", ) parser.add_argument( "--null_score_diff_threshold", type=float, default=0.0, help="If null_score - best_non_null is greater than the threshold predict null.", ) parser.add_argument( "--max_seq_length", default=384, type=int, help=( "The maximum total input sequence length after WordPiece tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded." ), ) parser.add_argument( "--doc_stride", default=128, type=int, help="When splitting up a long document into chunks, how much stride to take between chunks.", ) parser.add_argument("--per_device_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") parser.add_argument( "--n_best_size", default=20, type=int, help="The total number of n-best predictions to generate in the nbest_predictions.json output file.", ) parser.add_argument( "--max_answer_length", default=30, type=int, help=( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ), ) parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument( "--dataset_name", type=str, default=None, required=True, help="The name of the dataset to use (via the datasets library).", ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The configuration name of the dataset to use (via the datasets library).", ) parser.add_argument( "--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data." ) parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets") parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision instead of 32-bit", ) parser.add_argument( "--int8", action="store_true", help="Whether to use INT8", ) __UpperCAmelCase : Tuple = parser.parse_args() if args.tokenizer_name: __UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) logger.info("Training/evaluation parameters %s", args) __UpperCAmelCase : Optional[Any] = args.per_device_eval_batch_size __UpperCAmelCase : Dict = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : str = "temp_engine/bert-fp32.engine" if args.fpaa: __UpperCAmelCase : Tuple = "temp_engine/bert-fp16.engine" if args.inta: __UpperCAmelCase : List[Any] = "temp_engine/bert-int8.engine" # import ONNX file if not os.path.exists("temp_engine"): os.makedirs("temp_engine") __UpperCAmelCase : Optional[int] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, "rb") as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network __UpperCAmelCase : int = [network.get_input(i) for i in range(network.num_inputs)] __UpperCAmelCase : List[Any] = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: __UpperCAmelCase : Optional[Any] = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) __UpperCAmelCase : Any = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) __UpperCAmelCase : Union[str, Any] = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, "wb") as f: f.write(engine.serialize()) def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> str: __snake_case: Tuple = np.asarray(inputs["""input_ids"""] , dtype=np.intaa) __snake_case: Union[str, Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa) __snake_case: List[str] = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , SCREAMING_SNAKE_CASE__) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , SCREAMING_SNAKE_CASE__) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , SCREAMING_SNAKE_CASE__) # start time __snake_case: int = time.time() # Run inference context.execute_async( bindings=[int(SCREAMING_SNAKE_CASE__) for d_inp in d_inputs] + [int(SCREAMING_SNAKE_CASE__), int(SCREAMING_SNAKE_CASE__)] , stream_handle=stream.handle) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) cuda.memcpy_dtoh_async(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) # Synchronize the stream and take time stream.synchronize() # end time __snake_case: Optional[Any] = time.time() __snake_case: Dict = end_time - start_time __snake_case: Any = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. __UpperCAmelCase : Union[str, Any] = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. __UpperCAmelCase : Union[str, Any] = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError("Evaluation requires a dataset name") # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. __UpperCAmelCase : str = raw_datasets["validation"].column_names __UpperCAmelCase : Dict = "question" if "question" in column_names else column_names[0] __UpperCAmelCase : List[Any] = "context" if "context" in column_names else column_names[1] __UpperCAmelCase : List[str] = "answers" if "answers" in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). __UpperCAmelCase : List[str] = tokenizer.padding_side == "right" if args.max_seq_length > tokenizer.model_max_length: logger.warning( f'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the' f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' ) __UpperCAmelCase : Union[str, Any] = min(args.max_seq_length, tokenizer.model_max_length) def A__ ( SCREAMING_SNAKE_CASE__) -> Optional[int]: # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace __snake_case: Optional[int] = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. __snake_case: List[str] = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=SCREAMING_SNAKE_CASE__ , stride=args.doc_stride , return_overflowing_tokens=SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , padding="""max_length""" , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. __snake_case: Optional[Any] = tokenized_examples.pop("""overflow_to_sample_mapping""") # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. __snake_case: int = [] for i in range(len(tokenized_examples["""input_ids"""])): # Grab the sequence corresponding to that example (to know what is the context and what is the question). __snake_case: int = tokenized_examples.sequence_ids(SCREAMING_SNAKE_CASE__) __snake_case: List[Any] = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. __snake_case: Any = sample_mapping[i] tokenized_examples["example_id"].append(examples["""id"""][sample_index]) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. __snake_case: Dict = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["""offset_mapping"""][i]) ] return tokenized_examples __UpperCAmelCase : int = raw_datasets["validation"] # Validation Feature Creation __UpperCAmelCase : Dict = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc="Running tokenizer on validation dataset", ) __UpperCAmelCase : Dict = default_data_collator __UpperCAmelCase : List[Any] = eval_dataset.remove_columns(["example_id", "offset_mapping"]) __UpperCAmelCase : str = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="eval") -> Optional[int]: # Post-processing: we match the start logits and end logits to answers in the original context. __snake_case: Optional[Any] = postprocess_qa_predictions( examples=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , predictions=SCREAMING_SNAKE_CASE__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=SCREAMING_SNAKE_CASE__ , ) # Format the result to the format the metric expects. if args.version_2_with_negative: __snake_case: Tuple = [ {"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items() ] else: __snake_case: str = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()] __snake_case: Optional[Any] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=SCREAMING_SNAKE_CASE__ , label_ids=SCREAMING_SNAKE_CASE__) __UpperCAmelCase : List[str] = load_metric("squad_v2" if args.version_2_with_negative else "squad") # Evaluation! logger.info("Loading ONNX model %s for evaluation", args.onnx_model_path) with open(engine_name, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def A__ ( SCREAMING_SNAKE_CASE__) -> Union[str, Any]: return trt.volume(engine.get_binding_shape(SCREAMING_SNAKE_CASE__)) * engine.get_binding_dtype(SCREAMING_SNAKE_CASE__).itemsize # Allocate device memory for inputs and outputs. __UpperCAmelCase : int = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer __UpperCAmelCase : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) __UpperCAmelCase : Any = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) __UpperCAmelCase : Union[str, Any] = cuda.mem_alloc(h_outputa.nbytes) __UpperCAmelCase : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. __UpperCAmelCase : Optional[int] = cuda.Stream() # Evaluation logger.info("***** Running Evaluation *****") logger.info(f' Num examples = {len(eval_dataset)}') logger.info(f' Batch size = {args.per_device_eval_batch_size}') __UpperCAmelCase : Optional[Any] = 0.0 __UpperCAmelCase : Optional[int] = 0 __UpperCAmelCase : Any = timeit.default_timer() __UpperCAmelCase : Union[str, Any] = None for step, batch in enumerate(eval_dataloader): __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 __UpperCAmelCase , __UpperCAmelCase : str = outputs __UpperCAmelCase : Any = torch.tensor(start_logits) __UpperCAmelCase : Tuple = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered __UpperCAmelCase : Optional[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100) __UpperCAmelCase : int = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100) __UpperCAmelCase : List[str] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) __UpperCAmelCase : List[str] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) if all_preds is not None: __UpperCAmelCase : Union[str, Any] = nested_truncate(all_preds, len(eval_dataset)) __UpperCAmelCase : List[str] = timeit.default_timer() - start_time logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info("Average Inference Time = {:.3f} ms".format(total_time * 1_000 / niter)) logger.info("Total Inference Time = {:.3f} ms".format(total_time * 1_000)) logger.info("Total Number of Inference = %d", niter) __UpperCAmelCase : List[Any] = post_processing_function(eval_examples, eval_dataset, all_preds) __UpperCAmelCase : Optional[int] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f'Evaluation metrics: {eval_metric}')
111
0
'''simple docstring''' import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler UpperCamelCase__ = 1_6 UpperCamelCase__ = 3_2 def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = 16 , lowerCAmelCase__ = "bert-base-cased" ) -> Union[str, Any]: UpperCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase__ ) UpperCAmelCase__ : Dict = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(lowerCAmelCase__ ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase__ : Optional[int] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset UpperCAmelCase__ : Union[str, Any] = datasets.map( lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=lowerCAmelCase__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase__ : Union[str, Any] = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(lowerCAmelCase__ ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowerCAmelCase__ , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' ) return tokenizer.pad(lowerCAmelCase__ , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. UpperCAmelCase__ : Dict = DataLoader( tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ ) UpperCAmelCase__ : List[str] = DataLoader( tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ ) return train_dataloader, eval_dataloader def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]: model.eval() UpperCAmelCase__ : Union[str, Any] = 0 for step, batch in enumerate(lowerCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase__ : Tuple = model(**lowerCAmelCase__ ) UpperCAmelCase__ : str = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = accelerator.gather( (predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(lowerCAmelCase__ ) - 1: UpperCAmelCase__ : Dict = predictions[: len(eval_dataloader.dataset ) - samples_seen] UpperCAmelCase__ : Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , ) UpperCAmelCase__ : Dict = metric.compute() return eval_metric["accuracy"] def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str: # Initialize accelerator UpperCAmelCase__ : List[Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase__ : Dict = config['''lr'''] UpperCAmelCase__ : Union[str, Any] = int(config['''num_epochs'''] ) UpperCAmelCase__ : int = int(config['''seed'''] ) UpperCAmelCase__ : Optional[Any] = int(config['''batch_size'''] ) UpperCAmelCase__ : int = args.model_name_or_path set_seed(lowerCAmelCase__ ) UpperCAmelCase__ , UpperCAmelCase__ : Any = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase__ : str = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase__ , return_dict=lowerCAmelCase__ ) # Instantiate optimizer UpperCAmelCase__ : Optional[Any] = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) UpperCAmelCase__ : List[str] = optimizer_cls(params=model.parameters() , lr=lowerCAmelCase__ ) if accelerator.state.deepspeed_plugin is not None: UpperCAmelCase__ : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: UpperCAmelCase__ : Optional[Any] = 1 UpperCAmelCase__ : Any = (len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): UpperCAmelCase__ : str = get_linear_schedule_with_warmup( optimizer=lowerCAmelCase__ , num_warmup_steps=0 , num_training_steps=lowerCAmelCase__ , ) else: UpperCAmelCase__ : Any = DummyScheduler(lowerCAmelCase__ , total_num_steps=lowerCAmelCase__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = accelerator.prepare( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # We need to keep track of how many total steps we have iterated over UpperCAmelCase__ : Dict = 0 # We also need to keep track of the stating epoch so files are named properly UpperCAmelCase__ : Any = 0 UpperCAmelCase__ : List[Any] = evaluate.load('''glue''' , '''mrpc''' ) UpperCAmelCase__ : List[Any] = num_epochs if args.partial_train_epoch is not None: UpperCAmelCase__ : List[str] = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) UpperCAmelCase__ : Optional[Any] = args.resume_from_checkpoint.split('''epoch_''' )[1] UpperCAmelCase__ : Optional[int] = '''''' for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break UpperCAmelCase__ : Any = int(lowerCAmelCase__ ) + 1 UpperCAmelCase__ : List[Any] = evaluation_loop(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) accelerator.print('''resumed checkpoint performance:''' , lowerCAmelCase__ ) accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] ) accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] ) with open(os.path.join(args.output_dir , F"""state_{starting_epoch-1}.json""" ) , '''r''' ) as f: UpperCAmelCase__ : Tuple = json.load(lowerCAmelCase__ ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model UpperCAmelCase__ : List[Any] = {} for epoch in range(lowerCAmelCase__ , lowerCAmelCase__ ): model.train() for step, batch in enumerate(lowerCAmelCase__ ): UpperCAmelCase__ : int = model(**lowerCAmelCase__ ) UpperCAmelCase__ : Optional[int] = outputs.loss UpperCAmelCase__ : Tuple = loss / gradient_accumulation_steps accelerator.backward(lowerCAmelCase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 UpperCAmelCase__ : List[Any] = F"""epoch_{epoch}""" UpperCAmelCase__ : Dict = os.path.join(args.output_dir , lowerCAmelCase__ ) accelerator.save_state(lowerCAmelCase__ ) UpperCAmelCase__ : Any = evaluation_loop(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) UpperCAmelCase__ : Tuple = accuracy UpperCAmelCase__ : int = lr_scheduler.get_lr()[0] UpperCAmelCase__ : List[str] = optimizer.param_groups[0]['''lr'''] UpperCAmelCase__ : str = epoch UpperCAmelCase__ : List[str] = overall_step accelerator.print(F"""epoch {epoch}:""" , lowerCAmelCase__ ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , F"""state_{epoch}.json""" ) , '''w''' ) as f: json.dump(lowerCAmelCase__ , lowerCAmelCase__ ) def a__ ( ) -> Tuple: UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''' , type=lowerCAmelCase__ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=lowerCAmelCase__ , ) parser.add_argument( '''--output_dir''' , type=lowerCAmelCase__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , ) parser.add_argument( '''--resume_from_checkpoint''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''If the training should continue from a checkpoint folder.''' , ) parser.add_argument( '''--partial_train_epoch''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''If passed, the training will stop after this number of epochs.''' , ) parser.add_argument( '''--num_epochs''' , type=lowerCAmelCase__ , default=2 , help='''Number of train epochs.''' , ) UpperCAmelCase__ : Optional[int] = parser.parse_args() UpperCAmelCase__ : Optional[Any] = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16} training_function(lowerCAmelCase__ , lowerCAmelCase__ ) if __name__ == "__main__": main()
299
'''simple docstring''' from collections.abc import Iterable from typing import Any class lowerCamelCase_ : def __init__( self : List[Any] , _A : int | None = None ): '''simple docstring''' UpperCAmelCase__ : List[Any] = value UpperCAmelCase__ : Node | None = None # Added in order to delete a node easier UpperCAmelCase__ : Node | None = None UpperCAmelCase__ : Node | None = None def __repr__( self : Optional[Any] ): '''simple docstring''' from pprint import pformat if self.left is None and self.right is None: return str(self.value ) return pformat({f"""{self.value}""": (self.left, self.right)} , indent=1 ) class lowerCamelCase_ : def __init__( self : Optional[Any] , _A : Node | None = None ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = root def __str__( self : Union[str, Any] ): '''simple docstring''' return str(self.root ) def lowercase_ ( self : str , _A : Node , _A : Node | None ): '''simple docstring''' if new_children is not None: # reset its kids UpperCAmelCase__ : Dict = node.parent if node.parent is not None: # reset its parent if self.is_right(_A ): # If it is the right children UpperCAmelCase__ : str = new_children else: UpperCAmelCase__ : Optional[int] = new_children else: UpperCAmelCase__ : Union[str, Any] = new_children def lowercase_ ( self : Union[str, Any] , _A : Node ): '''simple docstring''' if node.parent and node.parent.right: return node == node.parent.right return False def lowercase_ ( self : int ): '''simple docstring''' return self.root is None def lowercase_ ( self : List[str] , _A : Any ): '''simple docstring''' UpperCAmelCase__ : Dict = Node(_A ) # create a new Node if self.empty(): # if Tree is empty UpperCAmelCase__ : List[Any] = new_node # set its root else: # Tree is not empty UpperCAmelCase__ : str = self.root # from root if parent_node is None: return while True: # While we don't get to a leaf if value < parent_node.value: # We go left if parent_node.left is None: UpperCAmelCase__ : Optional[Any] = new_node # We insert the new node in a leaf break else: UpperCAmelCase__ : Any = parent_node.left else: if parent_node.right is None: UpperCAmelCase__ : str = new_node break else: UpperCAmelCase__ : List[str] = parent_node.right UpperCAmelCase__ : Tuple = parent_node def lowercase_ ( self : Optional[Any] , *_A : Tuple ): '''simple docstring''' for value in values: self.__insert(_A ) def lowercase_ ( self : Union[str, Any] , _A : int ): '''simple docstring''' if self.empty(): raise IndexError('''Warning: Tree is empty! please use another.''' ) else: UpperCAmelCase__ : List[Any] = self.root # use lazy evaluation here to avoid NoneType Attribute error while node is not None and node.value is not value: UpperCAmelCase__ : str = node.left if value < node.value else node.right return node def lowercase_ ( self : List[Any] , _A : Node | None = None ): '''simple docstring''' if node is None: if self.root is None: return None UpperCAmelCase__ : int = self.root if not self.empty(): while node.right is not None: UpperCAmelCase__ : Tuple = node.right return node def lowercase_ ( self : List[Any] , _A : Node | None = None ): '''simple docstring''' if node is None: UpperCAmelCase__ : Optional[int] = self.root if self.root is None: return None if not self.empty(): UpperCAmelCase__ : Optional[int] = self.root while node.left is not None: UpperCAmelCase__ : Tuple = node.left return node def lowercase_ ( self : List[Any] , _A : int ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.search(_A ) # Look for the node with that label if node is not None: if node.left is None and node.right is None: # If it has no children self.__reassign_nodes(_A , _A ) elif node.left is None: # Has only right children self.__reassign_nodes(_A , node.right ) elif node.right is None: # Has only left children self.__reassign_nodes(_A , node.left ) else: UpperCAmelCase__ : Union[str, Any] = self.get_max( node.left ) # Gets the max value of the left branch self.remove(tmp_node.value ) # type: ignore UpperCAmelCase__ : Optional[Any] = ( tmp_node.value # type: ignore ) # Assigns the value to the node to delete and keep tree structure def lowercase_ ( self : List[str] , _A : Node | None ): '''simple docstring''' if node is not None: yield node # Preorder Traversal yield from self.preorder_traverse(node.left ) yield from self.preorder_traverse(node.right ) def lowercase_ ( self : str , _A : Any=None ): '''simple docstring''' if traversal_function is None: return self.preorder_traverse(self.root ) else: return traversal_function(self.root ) def lowercase_ ( self : Dict , _A : list , _A : Node | None ): '''simple docstring''' if node: self.inorder(_A , node.left ) arr.append(node.value ) self.inorder(_A , node.right ) def lowercase_ ( self : Optional[Any] , _A : int , _A : Node ): '''simple docstring''' UpperCAmelCase__ : list[int] = [] self.inorder(_A , _A ) # append all values to list using inorder traversal return arr[k - 1] def a__ ( lowerCAmelCase__ ) -> list[Node]: UpperCAmelCase__ : Union[str, Any] = [] if curr_node is not None: UpperCAmelCase__ : str = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node] return node_list def a__ ( ) -> None: UpperCAmelCase__ : List[Any] = (8, 3, 6, 1, 10, 14, 13, 4, 7) UpperCAmelCase__ : str = BinarySearchTree() for i in testlist: t.insert(lowerCAmelCase__ ) # Prints all the elements of the list in order traversal print(lowerCAmelCase__ ) if t.search(6 ) is not None: print('''The value 6 exists''' ) else: print('''The value 6 doesn\'t exist''' ) if t.search(-1 ) is not None: print('''The value -1 exists''' ) else: print('''The value -1 doesn\'t exist''' ) if not t.empty(): print('''Max Value: ''' , t.get_max().value ) # type: ignore print('''Min Value: ''' , t.get_min().value ) # type: ignore for i in testlist: t.remove(lowerCAmelCase__ ) print(lowerCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
299
1
'''simple docstring''' import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class lowercase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[str] = inspect.getfile(accelerate.test_utils ) UpperCamelCase__ :Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] ) UpperCamelCase__ :Optional[int] = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] ) UpperCamelCase__ :List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] ) @require_multi_gpu def lowerCAmelCase__ ( self ): '''simple docstring''' print(F'''Found {torch.cuda.device_count()} devices.''' ) UpperCamelCase__ :List[str] = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() ) @require_multi_gpu def lowerCAmelCase__ ( self ): '''simple docstring''' print(F'''Found {torch.cuda.device_count()} devices.''' ) UpperCamelCase__ :Tuple = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path] print(F'''Command: {cmd}''' ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() ) @require_multi_gpu def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Tuple = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() ) @require_multi_gpu def lowerCAmelCase__ ( self ): '''simple docstring''' print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''' ) UpperCamelCase__ :Tuple = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ): execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() ) if __name__ == "__main__": __snake_case = Accelerator() __snake_case = (accelerator.state.process_index + 2, 10) __snake_case = torch.randint(0, 10, shape).to(accelerator.device) __snake_case = '''''' __snake_case = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." __snake_case = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." __snake_case = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
97
'''simple docstring''' import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def a ( __a , __a ) -> Optional[int]: '''simple docstring''' assert isinstance(__a , __a ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def a ( __a , __a , __a ) -> Optional[Any]: '''simple docstring''' UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache''' UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCamelCase__ :Tuple = JsonDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read() _check_json_dataset(__a , __a ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def a ( __a , __a , __a ) -> Any: '''simple docstring''' UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache''' UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCamelCase__ :Optional[Any] = features.copy() if features else default_expected_features UpperCamelCase__ :Tuple = ( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase__ :int = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read() _check_json_dataset(__a , __a ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}, ] , ) def a ( __a , __a , __a ) -> Tuple: '''simple docstring''' UpperCamelCase__ :int = tmp_path / '''cache''' UpperCamelCase__ :str = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''} UpperCamelCase__ :Any = features.copy() if features else default_expected_features UpperCamelCase__ :Union[str, Any] = ( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase__ :Any = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read() assert isinstance(__a , __a ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def a ( __a , __a ) -> List[Any]: '''simple docstring''' UpperCamelCase__ :Any = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''} UpperCamelCase__ :int = features.copy() UpperCamelCase__ :List[Any] = ( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase__ :Optional[int] = tmp_path / '''cache''' UpperCamelCase__ :Dict = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read() assert isinstance(__a , __a ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def a ( __a , __a , __a ) -> List[Any]: '''simple docstring''' UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache''' UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCamelCase__ :List[Any] = JsonDatasetReader(__a , cache_dir=__a , split=__a ).read() _check_json_dataset(__a , __a ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def a ( __a , __a , __a ) -> Any: '''simple docstring''' if issubclass(__a , __a ): UpperCamelCase__ :Union[str, Any] = jsonl_path elif issubclass(__a , __a ): UpperCamelCase__ :int = [jsonl_path] UpperCamelCase__ :Dict = tmp_path / '''cache''' UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCamelCase__ :List[str] = JsonDatasetReader(__a , cache_dir=__a ).read() _check_json_dataset(__a , __a ) def a ( __a , __a , __a=("train",) ) -> Optional[Any]: '''simple docstring''' assert isinstance(__a , __a ) for split in splits: UpperCamelCase__ :Optional[int] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def a ( __a , __a , __a ) -> List[str]: '''simple docstring''' UpperCamelCase__ :List[str] = tmp_path / '''cache''' UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCamelCase__ :str = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=__a , keep_in_memory=__a ).read() _check_json_datasetdict(__a , __a ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def a ( __a , __a , __a ) -> int: '''simple docstring''' UpperCamelCase__ :Tuple = tmp_path / '''cache''' UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCamelCase__ :Optional[int] = features.copy() if features else default_expected_features UpperCamelCase__ :str = ( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase__ :Dict = JsonDatasetReader({'''train''': jsonl_path} , features=__a , cache_dir=__a ).read() _check_json_datasetdict(__a , __a ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def a ( __a , __a , __a ) -> str: '''simple docstring''' if split: UpperCamelCase__ :List[str] = {split: jsonl_path} else: UpperCamelCase__ :int = '''train''' UpperCamelCase__ :int = {'''train''': jsonl_path, '''test''': jsonl_path} UpperCamelCase__ :Any = tmp_path / '''cache''' UpperCamelCase__ :Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCamelCase__ :Any = JsonDatasetReader(__a , cache_dir=__a ).read() _check_json_datasetdict(__a , __a , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def a ( __a ) -> Union[str, Any]: '''simple docstring''' return json.load(__a ) def a ( __a ) -> int: '''simple docstring''' return [json.loads(__a ) for line in buffer] class lowercase : """simple docstring""" @pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ ).write() buffer.seek(0 ) UpperCamelCase__ :List[Any] = load_json_function(UpperCamelCase_ ) assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) assert isinstance(exported_content[0] , UpperCamelCase_ ) assert len(UpperCamelCase_ ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''' , [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ] , ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ ).write() buffer.seek(0 ) UpperCamelCase__ :Optional[int] = load_json(UpperCamelCase_ ) assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase_ ) == 10 @pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , num_proc=2 ).write() buffer.seek(0 ) UpperCamelCase__ :Union[str, Any] = load_json_function(UpperCamelCase_ ) assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) assert isinstance(exported_content[0] , UpperCamelCase_ ) assert len(UpperCamelCase_ ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''' , [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ] , ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ , num_proc=2 ).write() buffer.seek(0 ) UpperCamelCase__ :int = load_json(UpperCamelCase_ ) assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase_ ) == 10 def lowerCAmelCase__ ( self , UpperCamelCase_ ): '''simple docstring''' with pytest.raises(UpperCamelCase_ ): with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , num_proc=0 ) @pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ :Tuple = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}''' UpperCamelCase__ :Union[str, Any] = str(shared_datadir / F'''test_file.json.{extension}''' ) JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , compression=UpperCamelCase_ ).write() with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f: UpperCamelCase__ :Dict = f.read() with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f: UpperCamelCase__ :int = f.read() assert exported_content == original_content
97
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __UpperCAmelCase : Union[str, Any] = { "configuration_blenderbot": [ "BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlenderbotConfig", "BlenderbotOnnxConfig", ], "tokenization_blenderbot": ["BlenderbotTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : List[Any] = ["BlenderbotTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : List[Any] = [ "BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST", "BlenderbotForCausalLM", "BlenderbotForConditionalGeneration", "BlenderbotModel", "BlenderbotPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : List[str] = [ "TFBlenderbotForConditionalGeneration", "TFBlenderbotModel", "TFBlenderbotPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : List[Any] = [ "FlaxBlenderbotForConditionalGeneration", "FlaxBlenderbotModel", "FlaxBlenderbotPreTrainedModel", ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys __UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
315
from __future__ import annotations import collections import pprint from pathlib import Path def a ( SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" return "".join(sorted(SCREAMING_SNAKE_CASE_ ) ) def a ( SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" return word_by_signature[signature(SCREAMING_SNAKE_CASE_ )] __UpperCAmelCase : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8") __UpperCAmelCase : Tuple = sorted({word.strip().lower() for word in data.splitlines()}) __UpperCAmelCase : Union[str, Any] = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": __UpperCAmelCase : int = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open("anagrams.txt", "w") as file: file.write("all_anagrams = \n ") file.write(pprint.pformat(all_anagrams))
315
1
"""simple docstring""" import csv import tweepy # Twitter API credentials _SCREAMING_SNAKE_CASE : Optional[Any] = '''''' _SCREAMING_SNAKE_CASE : Union[str, Any] = '''''' _SCREAMING_SNAKE_CASE : Dict = '''''' _SCREAMING_SNAKE_CASE : List[Any] = '''''' def lowerCamelCase__ ( _lowerCamelCase : Any ) -> Union[str, Any]: # authorize twitter, initialize tweepy lowerCamelCase_ = tweepy.OAuthHandler(lowercase__ , lowercase__ ) auth.set_access_token(lowercase__ , lowercase__ ) lowerCamelCase_ = tweepy.API(lowercase__ ) # initialize a list to hold all the tweepy Tweets lowerCamelCase_ = [] # make initial request for most recent tweets (200 is the maximum allowed count) lowerCamelCase_ = api.user_timeline(screen_name=lowercase__ , count=200 ) # save most recent tweets alltweets.extend(lowercase__ ) # save the id of the oldest tweet less one lowerCamelCase_ = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(lowercase__ ) > 0: print(F'''getting tweets before {oldest}''' ) # all subsequent requests use the max_id param to prevent duplicates lowerCamelCase_ = api.user_timeline( screen_name=lowercase__ , count=200 , max_id=lowercase__ ) # save most recent tweets alltweets.extend(lowercase__ ) # update the id of the oldest tweet less one lowerCamelCase_ = alltweets[-1].id - 1 print(F'''...{len(lowercase__ )} tweets downloaded so far''' ) # transform the tweepy tweets into a 2D array that will populate the csv lowerCamelCase_ = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(F'''new_{screen_name}_tweets.csv''' , 'w' ) as f: lowerCamelCase_ = csv.writer(lowercase__ ) writer.writerow(['id', 'created_at', 'text'] ) writer.writerows(lowercase__ ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets('''FirePing32''')
183
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase__ = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ """VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""", """ViTMSNModel""", """ViTMSNForImageClassification""", """ViTMSNPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
96
0
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) __lowerCamelCase : Optional[int] = '''\ Text data. Second line of data.''' __lowerCamelCase : Union[str, Any] = '''file''' @pytest.fixture(scope="""session""" ) def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""") SCREAMING_SNAKE_CASE__ = bytes(__UpperCamelCase , """utf-8""" ) with zstd.open(__UpperCamelCase , """wb""" ) as f: f.write(__UpperCamelCase ) return path @pytest.fixture def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Dict: """simple docstring""" with open(os.path.join(tmpfs.local_root_dir , __UpperCamelCase ) , """w""" ) as f: f.write(__UpperCamelCase ) return FILE_PATH @pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] ) def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : str , __UpperCamelCase : List[str] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path} SCREAMING_SNAKE_CASE__ = input_paths[compression_format] SCREAMING_SNAKE_CASE__ = tmp_path / """cache""" SCREAMING_SNAKE_CASE__ = DownloadConfig(cache_dir=__UpperCamelCase , extract_compressed_file=__UpperCamelCase ) SCREAMING_SNAKE_CASE__ = cached_path(__UpperCamelCase , download_config=__UpperCamelCase ) with open(__UpperCamelCase ) as f: SCREAMING_SNAKE_CASE__ = f.read() with open(__UpperCamelCase ) as f: SCREAMING_SNAKE_CASE__ = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize("""default_extracted""" , [True, False] ) @pytest.mark.parametrize("""default_cache_dir""" , [True, False] ) def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : Tuple ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = """custom_cache""" SCREAMING_SNAKE_CASE__ = """custom_extracted_dir""" SCREAMING_SNAKE_CASE__ = tmp_path / """custom_extracted_path""" if default_extracted: SCREAMING_SNAKE_CASE__ = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""") else: monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , __UpperCamelCase ) monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(__UpperCamelCase ) ) SCREAMING_SNAKE_CASE__ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) SCREAMING_SNAKE_CASE__ = xz_file SCREAMING_SNAKE_CASE__ = ( DownloadConfig(extract_compressed_file=__UpperCamelCase ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__UpperCamelCase ) ) SCREAMING_SNAKE_CASE__ = cached_path(__UpperCamelCase , download_config=__UpperCamelCase ) assert Path(__UpperCamelCase ).parent.parts[-2:] == expected def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = str(Path(__UpperCamelCase ).resolve() ) assert cached_path(__UpperCamelCase ) == text_file # relative path SCREAMING_SNAKE_CASE__ = str(Path(__UpperCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(__UpperCamelCase ) == text_file def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = str(tmp_path.resolve() / """__missing_file__.txt""" ) with pytest.raises(__UpperCamelCase ): cached_path(__UpperCamelCase ) # relative path SCREAMING_SNAKE_CASE__ = """./__missing_file__.txt""" with pytest.raises(__UpperCamelCase ): cached_path(__UpperCamelCase ) def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = get_from_cache(f"""tmp://{tmpfs_file}""" ) with open(__UpperCamelCase ) as f: SCREAMING_SNAKE_CASE__ = f.read() assert output_file_content == FILE_CONTENT @patch("""datasets.config.HF_DATASETS_OFFLINE""" , __UpperCamelCase ) def __SCREAMING_SNAKE_CASE ( ) -> List[Any]: """simple docstring""" with pytest.raises(__UpperCamelCase ): cached_path("""https://huggingface.co""" ) @patch("""datasets.config.HF_DATASETS_OFFLINE""" , __UpperCamelCase ) def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp("""data""" ) / """file.html""" with pytest.raises(__UpperCamelCase ): http_get("""https://huggingface.co""" , temp_file=__UpperCamelCase ) with pytest.raises(__UpperCamelCase ): http_head("""https://huggingface.co""" ) @patch("""datasets.config.HF_DATASETS_OFFLINE""" , __UpperCamelCase ) def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp("""data""" ) / """file.html""" with pytest.raises(__UpperCamelCase ): ftp_get("""ftp://huggingface.co""" , temp_file=__UpperCamelCase ) with pytest.raises(__UpperCamelCase ): ftp_head("""ftp://huggingface.co""" ) @patch("""datasets.config.HF_DATASETS_OFFLINE""" , __UpperCamelCase ) def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp("""data""" ) / """file.html""" with pytest.raises(__UpperCamelCase ): fsspec_get("""s3://huggingface.co""" , temp_file=__UpperCamelCase ) with pytest.raises(__UpperCamelCase ): fsspec_head("""s3://huggingface.co""" )
204
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
204
1
'''simple docstring''' import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel UpperCamelCase__ : Union[str, Any] = HfApi() UpperCamelCase__ : Tuple = {} # fmt: off UpperCamelCase__ : Any = torch.tensor([ -0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7, 1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9, -1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9, 0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7 ]) UpperCamelCase__ : Any = torch.tensor([ -2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6, 1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8, -2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8, 2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5 ]) UpperCamelCase__ : int = torch.tensor([ -0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9, -0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4, -0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5, 0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3 ]) UpperCamelCase__ : int = torch.tensor([ 0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2, -0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9, 0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5, -0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5 ]) UpperCamelCase__ : int = torch.tensor([ 0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3, -0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5, 0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9, -0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6 ]) UpperCamelCase__ : List[Any] = torch.tensor([ 0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8, -0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0, 0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3, -0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1 ]) UpperCamelCase__ : List[str] = torch.tensor([ 0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2, -0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8, 0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4, -0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0 ]) UpperCamelCase__ : Any = torch.tensor([ 0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2, -0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0, 0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6, -0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3 ]) UpperCamelCase__ : Dict = torch.tensor([ -1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0, 1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3, -2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0, 1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1]) UpperCamelCase__ : Tuple = torch.tensor([ -1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4, 0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1, -2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9, 1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6 ]) UpperCamelCase__ : Optional[Any] = torch.tensor([ -1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2, 0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7, -2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1, 1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5 ]) UpperCamelCase__ : int = torch.tensor([ -2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9, 1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1, -3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1, 3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6 ]) UpperCamelCase__ : Any = torch.tensor([ -2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0, 1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8, -2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5, 2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3 ]) UpperCamelCase__ : Any = torch.tensor([ -2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6, 1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8, -3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0, 3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3 ]) UpperCamelCase__ : Optional[Any] = torch.tensor([ -1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4, 1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1, -2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9, 1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9 ]) # fmt: on UpperCamelCase__ : Optional[Any] = api.list_models(filter='''diffusers''') for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": UpperCamelCase__ : Union[str, Any] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1] print(f"Started running {mod.modelId}!!!") if mod.modelId.startswith('''CompVis'''): UpperCamelCase__ : Union[str, Any] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''') else: UpperCamelCase__ : List[Any] = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) UpperCamelCase__ : List[str] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) UpperCamelCase__ : List[str] = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): UpperCamelCase__ : Optional[int] = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3 ) print(f"{mod.modelId} has passed successfully!!!")
112
'''simple docstring''' def lowerCAmelCase_ ( _lowerCamelCase: int ): assert ( isinstance(_lowerCamelCase , _lowerCamelCase ) and number_of_steps > 0 ), F"number_of_steps needs to be positive integer, your input {number_of_steps}" if number_of_steps == 1: return 1 __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = 1, 1 for _ in range(number_of_steps - 1 ): __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = current + previous, current return current if __name__ == "__main__": import doctest doctest.testmod()
112
1
import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class UpperCAmelCase_ ( __lowercase ): """simple docstring""" def __init__( self , _a , _a=1_3 , _a=7 , _a=True , _a=True , _a=False , _a=True , _a=9_9 , _a=3_2 , _a=5 , _a=4 , _a=6_4 , _a="gelu" , _a=0.1 , _a=0.1 , _a=5_1_2 , _a=1_6 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , _a=2 , _a=2 , _a=2 , _a=2 , _a=4 , _a=1 , ) -> str: _a : Optional[Any] = parent _a : Tuple = batch_size _a : Any = seq_length _a : Optional[Any] = is_training _a : List[Any] = use_input_mask _a : str = use_token_type_ids _a : Dict = use_labels _a : Dict = vocab_size _a : Any = hidden_size _a : List[Any] = num_hidden_layers _a : Any = num_attention_heads _a : Tuple = intermediate_size _a : Dict = hidden_act _a : str = hidden_dropout_prob _a : str = attention_probs_dropout_prob _a : Optional[int] = max_position_embeddings _a : Any = type_vocab_size _a : Union[str, Any] = type_sequence_label_size _a : str = initializer_range _a : Dict = num_labels _a : Optional[Any] = num_choices _a : Optional[int] = scope _a : int = q_groups _a : List[str] = k_groups _a : str = v_groups _a : str = post_attention_groups _a : Dict = intermediate_groups _a : Tuple = output_groups def __lowercase ( self ) -> Union[str, Any]: _a : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _a : Optional[int] = None if self.use_input_mask: _a : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) _a : Any = None _a : Optional[int] = None _a : Any = None if self.use_labels: _a : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _a : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _a : str = ids_tensor([self.batch_size] , self.num_choices ) _a : Optional[int] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowercase ( self ) -> str: return SqueezeBertConfig( embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , ) def __lowercase ( self , _a , _a , _a , _a , _a , _a ) -> str: _a : Tuple = SqueezeBertModel(config=_a ) model.to(_a ) model.eval() _a : Union[str, Any] = model(_a , _a ) _a : Union[str, Any] = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowercase ( self , _a , _a , _a , _a , _a , _a ) -> Any: _a : int = SqueezeBertForMaskedLM(config=_a ) model.to(_a ) model.eval() _a : List[Any] = model(_a , attention_mask=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowercase ( self , _a , _a , _a , _a , _a , _a ) -> Tuple: _a : List[str] = SqueezeBertForQuestionAnswering(config=_a ) model.to(_a ) model.eval() _a : Union[str, Any] = model( _a , attention_mask=_a , start_positions=_a , end_positions=_a ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowercase ( self , _a , _a , _a , _a , _a , _a ) -> Optional[int]: _a : Dict = self.num_labels _a : str = SqueezeBertForSequenceClassification(_a ) model.to(_a ) model.eval() _a : Union[str, Any] = model(_a , attention_mask=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowercase ( self , _a , _a , _a , _a , _a , _a ) -> Optional[Any]: _a : int = self.num_labels _a : Any = SqueezeBertForTokenClassification(config=_a ) model.to(_a ) model.eval() _a : Optional[int] = model(_a , attention_mask=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowercase ( self , _a , _a , _a , _a , _a , _a ) -> Optional[int]: _a : Dict = self.num_choices _a : List[Any] = SqueezeBertForMultipleChoice(config=_a ) model.to(_a ) model.eval() _a : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _a : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _a : Optional[Any] = model( _a , attention_mask=_a , labels=_a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowercase ( self ) -> str: _a : Optional[Any] = self.prepare_config_and_inputs() (_a) : Tuple = config_and_inputs _a : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : Tuple = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) UpperCAmelCase__ : Union[str, Any] = ( { "feature-extraction": SqueezeBertModel, "fill-mask": SqueezeBertForMaskedLM, "question-answering": SqueezeBertForQuestionAnswering, "text-classification": SqueezeBertForSequenceClassification, "token-classification": SqueezeBertForTokenClassification, "zero-shot": SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase__ : List[str] = False UpperCAmelCase__ : Any = True UpperCAmelCase__ : Optional[Any] = False def __lowercase ( self ) -> Optional[Any]: _a : Union[str, Any] = SqueezeBertModelTester(self ) _a : List[Any] = ConfigTester(self , config_class=_a , dim=3_7 ) def __lowercase ( self ) -> Union[str, Any]: self.config_tester.run_common_tests() def __lowercase ( self ) -> Tuple: _a : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*_a ) def __lowercase ( self ) -> Optional[Any]: _a : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*_a ) def __lowercase ( self ) -> Union[str, Any]: _a : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*_a ) def __lowercase ( self ) -> Union[str, Any]: _a : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_a ) def __lowercase ( self ) -> str: _a : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*_a ) def __lowercase ( self ) -> Optional[Any]: _a : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_a ) @slow def __lowercase ( self ) -> List[str]: for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _a : Optional[int] = SqueezeBertModel.from_pretrained(_a ) self.assertIsNotNone(_a ) @require_sentencepiece @require_tokenizers @require_torch class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @slow def __lowercase ( self ) -> List[str]: _a : int = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' ) _a : Optional[int] = torch.tensor([[1, 2_9_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 1_3, 1_5_8_8, 2]] ) _a : List[str] = model(_a )[0] _a : Any = torch.Size((1, 3) ) self.assertEqual(output.shape , _a ) _a : Optional[int] = torch.tensor([[0.6401, -0.0349, -0.6041]] ) self.assertTrue(torch.allclose(_a , _a , atol=1e-4 ) )
350
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : List[str] = ["image_processor", "tokenizer"] UpperCAmelCase__ : str = "ViltImageProcessor" UpperCAmelCase__ : Union[str, Any] = ("BertTokenizer", "BertTokenizerFast") def __init__( self , _a=None , _a=None , **_a ) -> Any: _a : Union[str, Any] = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , _a , ) _a : Dict = kwargs.pop('''feature_extractor''' ) _a : Optional[int] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(_a , _a ) _a : int = self.image_processor def __call__( self , _a , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding: _a : Tuple = self.tokenizer( text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , ) # add pixel_values + pixel_mask _a : str = self.image_processor(_a , return_tensors=_a ) encoding.update(_a ) return encoding def __lowercase ( self , *_a , **_a ) -> Optional[Any]: return self.tokenizer.batch_decode(*_a , **_a ) def __lowercase ( self , *_a , **_a ) -> str: return self.tokenizer.decode(*_a , **_a ) @property def __lowercase ( self ) -> Optional[int]: _a : str = self.tokenizer.model_input_names _a : Optional[Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def __lowercase ( self ) -> Optional[Any]: warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , ) return self.image_processor_class @property def __lowercase ( self ) -> Any: warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , ) return self.image_processor
15
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy __a = logging.get_logger(__name__) class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' def __init__( self: List[Any] , snake_case: int , snake_case: int , snake_case: float , **snake_case: Optional[int] ) -> Optional[Any]: snake_case_ :List[Any] = feature_size snake_case_ :Tuple = sampling_rate snake_case_ :Optional[int] = padding_value snake_case_ :Dict = kwargs.pop("""padding_side""" , """right""" ) snake_case_ :List[Any] = kwargs.pop("""return_attention_mask""" , snake_case ) super().__init__(**snake_case ) def lowerCAmelCase_ ( self: List[str] , snake_case: Union[ BatchFeature, List[BatchFeature], Dict[str, BatchFeature], Dict[str, List[BatchFeature]], List[Dict[str, BatchFeature]], ] , snake_case: Union[bool, str, PaddingStrategy] = True , snake_case: Optional[int] = None , snake_case: bool = False , snake_case: Optional[int] = None , snake_case: Optional[bool] = None , snake_case: Optional[Union[str, TensorType]] = None , ) -> BatchFeature: # If we have a list of dicts, let's convert it in a dict of lists # We do this to allow using this method as a collate_fn function in PyTorch Dataloader if isinstance(snake_case , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): snake_case_ :int = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( """You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`""" f""" to this method that includes {self.model_input_names[0]}, but you provided""" f""" {list(processed_features.keys() )}""" ) snake_case_ :Union[str, Any] = processed_features[self.model_input_names[0]] snake_case_ :Optional[int] = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(snake_case ) == 0: if return_attention_mask: snake_case_ :Optional[int] = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch snake_case_ :Optional[Any] = required_input[0] if isinstance(snake_case , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. snake_case_ :Tuple = 0 while len(required_input[index] ) == 0: index += 1 if index < len(snake_case ): snake_case_ :Tuple = required_input[index][0] if return_tensors is None: if is_tf_tensor(snake_case ): snake_case_ :int = """tf""" elif is_torch_tensor(snake_case ): snake_case_ :Union[str, Any] = """pt""" elif isinstance(snake_case , (int, float, list, tuple, np.ndarray) ): snake_case_ :Optional[int] = """np""" else: raise ValueError( f"""type of {first_element} unknown: {type(snake_case )}. """ """Should be one of a python, numpy, pytorch or tensorflow object.""" ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): snake_case_ :Optional[int] = to_numpy(snake_case ) else: snake_case_ :int = [to_numpy(snake_case ) for v in value] # Convert padding_strategy in PaddingStrategy snake_case_ :Union[str, Any] = self._get_padding_strategies(padding=snake_case , max_length=snake_case ) snake_case_ :List[Any] = processed_features[self.model_input_names[0]] snake_case_ :Optional[Any] = len(snake_case ) if not all(len(snake_case ) == batch_size for v in processed_features.values() ): raise ValueError("""Some items in the output dictionary have a different batch size than others.""" ) snake_case_ :Optional[Any] = [] for i in range(snake_case ): snake_case_ :List[Any] = {k: v[i] for k, v in processed_features.items()} # truncation snake_case_ :Union[str, Any] = self._truncate( snake_case , max_length=snake_case , pad_to_multiple_of=snake_case , truncation=snake_case , ) truncated_inputs.append(snake_case ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length snake_case_ :Optional[Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) snake_case_ :int = PaddingStrategy.MAX_LENGTH snake_case_ :List[Any] = {} for i in range(snake_case ): # padding snake_case_ :Any = self._pad( truncated_inputs[i] , max_length=snake_case , padding_strategy=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , ) for key, value in outputs.items(): if key not in batch_outputs: snake_case_ :Optional[int] = [] if value.dtype is np.dtype(np.floataa ): snake_case_ :Tuple = value.astype(np.floataa ) batch_outputs[key].append(snake_case ) return BatchFeature(snake_case , tensor_type=snake_case ) def lowerCAmelCase_ ( self: Dict , snake_case: Union[Dict[str, np.ndarray], BatchFeature] , snake_case: Optional[int] = None , snake_case: PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case: Optional[int] = None , snake_case: Optional[bool] = None , ) -> dict: snake_case_ :Any = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: snake_case_ :Any = len(snake_case ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): snake_case_ :Optional[int] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of snake_case_ :Tuple = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(snake_case ) < max_length if return_attention_mask and "attention_mask" not in processed_features: snake_case_ :Union[str, Any] = np.ones(len(snake_case ) , dtype=np.intaa ) if needs_to_be_padded: snake_case_ :Optional[int] = max_length - len(snake_case ) if self.padding_side == "right": if return_attention_mask: snake_case_ :Union[str, Any] = np.pad( processed_features["""attention_mask"""] , (0, difference) ) snake_case_ :Tuple = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) snake_case_ :int = np.pad( snake_case , snake_case , """constant""" , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: snake_case_ :Dict = np.pad( processed_features["""attention_mask"""] , (difference, 0) ) snake_case_ :str = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) snake_case_ :Optional[Any] = np.pad( snake_case , snake_case , """constant""" , constant_values=self.padding_value ) else: raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) ) return processed_features def lowerCAmelCase_ ( self: Optional[Any] , snake_case: Union[Dict[str, np.ndarray], BatchFeature] , snake_case: Optional[int] = None , snake_case: Optional[int] = None , snake_case: Optional[bool] = None , ) -> List[Any]: if not truncation: return processed_features elif truncation and max_length is None: raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" ) snake_case_ :Any = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): snake_case_ :str = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of snake_case_ :Optional[Any] = len(snake_case ) > max_length if needs_to_be_truncated: snake_case_ :Union[str, Any] = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: snake_case_ :Union[str, Any] = processed_features["""attention_mask"""][:max_length] return processed_features def lowerCAmelCase_ ( self: List[Any] , snake_case: Tuple=False , snake_case: Union[str, Any]=None ) -> Union[str, Any]: # Get padding strategy if padding is not False: if padding is True: snake_case_ :Optional[Any] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(snake_case , snake_case ): snake_case_ :Dict = PaddingStrategy(snake_case ) elif isinstance(snake_case , snake_case ): snake_case_ :Optional[Any] = padding else: snake_case_ :Optional[Any] = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( f"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( """Asking to pad but the feature_extractor does not have a padding value. Please select a value to use""" """ as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" ) return padding_strategy
66
def snake_case_ ( lowerCAmelCase_ : list ): if len(lowerCAmelCase_ ) <= 1: return [tuple(lowerCAmelCase_ )] __lowercase : Any = [] def generate(lowerCAmelCase_ : int , lowerCAmelCase_ : list ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , lowerCAmelCase_ ) for i in range(k - 1 ): if k % 2 == 0: # k is even __lowercase , __lowercase : List[str] = arr[k - 1], arr[i] else: # k is odd __lowercase , __lowercase : Any = arr[k - 1], arr[0] generate(k - 1 , lowerCAmelCase_ ) generate(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) return res if __name__ == "__main__": lowerCamelCase : Union[str, Any] = input('''Enter numbers separated by a comma:\n''').strip() lowerCamelCase : Optional[Any] = [int(item) for item in user_input.split(''',''')] print(heaps(arr))
233
0
"""simple docstring""" from __future__ import annotations import math import numpy as np from numpy.linalg import norm def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> float: """simple docstring""" return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__UpperCamelCase , __UpperCamelCase ) ) ) def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> list[list[list[float] | float]]: """simple docstring""" if dataset.ndim != value_array.ndim: lowerCAmelCase_ : str = ( "Wrong input data's dimensions... " f'''dataset : {dataset.ndim}, value_array : {value_array.ndim}''' ) raise ValueError(__UpperCamelCase ) try: if dataset.shape[1] != value_array.shape[1]: lowerCAmelCase_ : str = ( "Wrong input data's shape... " f'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}''' ) raise ValueError(__UpperCamelCase ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError("Wrong shape" ) if dataset.dtype != value_array.dtype: lowerCAmelCase_ : Tuple = ( "Input data have different datatype... " f'''dataset : {dataset.dtype}, value_array : {value_array.dtype}''' ) raise TypeError(__UpperCamelCase ) lowerCAmelCase_ : List[str] = [] for value in value_array: lowerCAmelCase_ : Union[str, Any] = euclidean(__UpperCamelCase , dataset[0] ) lowerCAmelCase_ : Optional[Any] = dataset[0].tolist() for dataset_value in dataset[1:]: lowerCAmelCase_ : Tuple = euclidean(__UpperCamelCase , __UpperCamelCase ) if dist > temp_dist: lowerCAmelCase_ : Any = temp_dist lowerCAmelCase_ : List[Any] = dataset_value.tolist() answer.append([vector, dist] ) return answer def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> float: """simple docstring""" return np.dot(__UpperCamelCase , __UpperCamelCase ) / (norm(__UpperCamelCase ) * norm(__UpperCamelCase )) if __name__ == "__main__": import doctest doctest.testmod()
161
"""simple docstring""" from packaging import version from .import_utils import is_accelerate_available if is_accelerate_available(): import accelerate def __lowerCamelCase ( __UpperCamelCase ) -> Any: """simple docstring""" if not is_accelerate_available(): return method lowerCAmelCase_ : Union[str, Any] = version.parse(accelerate.__version__ ).base_version if version.parse(__UpperCamelCase ) < version.parse("0.17.0" ): return method def wrapper(self , *__UpperCamelCase , **__UpperCamelCase ): if hasattr(self , "_hf_hook" ) and hasattr(self._hf_hook , "pre_forward" ): self._hf_hook.pre_forward(self ) return method(self , *__UpperCamelCase , **__UpperCamelCase ) return wrapper
161
1
"""simple docstring""" from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
84
'''simple docstring''' from __future__ import annotations import math def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> int: '''simple docstring''' if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if len(__UpperCAmelCase ) == 0: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1, node_index * 2, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), minimax(depth + 1, node_index * 2 + 1, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), ) return min( minimax(depth + 1, node_index * 2, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), minimax(depth + 1, node_index * 2 + 1, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), ) def __magic_name__ ( ) -> None: '''simple docstring''' snake_case_ = [90, 23, 6, 33, 21, 65, 123, 3_4423] snake_case_ = math.log(len(__UpperCAmelCase ), 2 ) print('''Optimal value : ''', end='''''' ) print(minimax(0, 0, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
56
0
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _A : Tuple = logging.get_logger(__name__) _A : List[Any] = { 'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json', } class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,lowerCAmelCase_ ): _UpperCAmelCase : Optional[int] = "focalnet" def __init__( self : int , A : str=2_2_4 , A : Optional[Any]=4 , A : Union[str, Any]=3 , A : Dict=9_6 , A : Optional[Any]=False , A : int=[1_9_2, 3_8_4, 7_6_8, 7_6_8] , A : Optional[Any]=[2, 2, 6, 2] , A : List[Any]=[2, 2, 2, 2] , A : Optional[int]=[3, 3, 3, 3] , A : str="gelu" , A : Union[str, Any]=4.0 , A : Optional[Any]=0.0 , A : int=0.1 , A : Any=False , A : str=1e-4 , A : int=False , A : Optional[int]=False , A : List[Any]=False , A : int=0.02 , A : Union[str, Any]=1e-5 , A : str=3_2 , A : Optional[Any]=None , A : Tuple=None , **A : Dict , ) ->Dict: super().__init__(**A ) lowerCamelCase__ : Tuple = image_size lowerCamelCase__ : Optional[Any] = patch_size lowerCamelCase__ : Dict = num_channels lowerCamelCase__ : List[Any] = embed_dim lowerCamelCase__ : int = use_conv_embed lowerCamelCase__ : Optional[Any] = hidden_sizes lowerCamelCase__ : Dict = depths lowerCamelCase__ : str = focal_levels lowerCamelCase__ : Dict = focal_windows lowerCamelCase__ : Optional[int] = hidden_act lowerCamelCase__ : int = mlp_ratio lowerCamelCase__ : Optional[Any] = hidden_dropout_prob lowerCamelCase__ : Optional[Any] = drop_path_rate lowerCamelCase__ : Tuple = use_layerscale lowerCamelCase__ : Optional[Any] = layerscale_value lowerCamelCase__ : Any = use_post_layernorm lowerCamelCase__ : Tuple = use_post_layernorm_in_modulation lowerCamelCase__ : Optional[Any] = normalize_modulator lowerCamelCase__ : int = initializer_range lowerCamelCase__ : Any = layer_norm_eps lowerCamelCase__ : List[Any] = encoder_stride lowerCamelCase__ : Tuple = ['''stem'''] + [F"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )] lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = get_aligned_output_features_output_indices( out_features=A , out_indices=A , stage_names=self.stage_names )
265
import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml _A : Any = logging.get_logger(__name__) def _a ( UpperCAmelCase , UpperCAmelCase ) -> List[str]: """simple docstring""" def run_func(UpperCAmelCase ): @wraps(UpperCAmelCase ) def run_in_eager_mode(*UpperCAmelCase , **UpperCAmelCase ): return func(*UpperCAmelCase , **UpperCAmelCase ) @wraps(UpperCAmelCase ) @tf.function(experimental_compile=UpperCAmelCase ) def run_in_graph_mode(*UpperCAmelCase , **UpperCAmelCase ): return func(*UpperCAmelCase , **UpperCAmelCase ) if do_eager_mode is True: if use_xla is not False: raise ValueError( '''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' ) return run_in_eager_mode else: return run_in_graph_mode return run_func def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> ["tf.Tensor"]: """simple docstring""" lowerCamelCase__ : List[Any] = random.Random() lowerCamelCase__ : str = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(UpperCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): _UpperCAmelCase : TensorFlowBenchmarkArguments _UpperCAmelCase : PretrainedConfig _UpperCAmelCase : str = "TensorFlow" @property def __lowerCamelCase ( self : int ) ->Optional[int]: return tf.__version__ def __lowerCamelCase ( self : Optional[int] , A : str , A : int , A : int ) ->float: # initialize GPU on separate process lowerCamelCase__ : Dict = self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' ) lowerCamelCase__ : int = self._prepare_inference_func(A , A , A ) return self._measure_speed(_inference ) def __lowerCamelCase ( self : str , A : str , A : int , A : int ) ->float: lowerCamelCase__ : Optional[int] = self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' ) lowerCamelCase__ : List[Any] = self._prepare_train_func(A , A , A ) return self._measure_speed(_train ) def __lowerCamelCase ( self : int , A : str , A : int , A : int ) ->[Memory, Optional[MemorySummary]]: # initialize GPU on separate process if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , A ) lowerCamelCase__ : int = self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' ) lowerCamelCase__ : str = self._prepare_inference_func(A , A , A ) return self._measure_memory(_inference ) def __lowerCamelCase ( self : List[str] , A : str , A : int , A : int ) ->[Memory, Optional[MemorySummary]]: if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , A ) lowerCamelCase__ : List[Any] = self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' ) lowerCamelCase__ : str = self._prepare_train_func(A , A , A ) return self._measure_memory(_train ) def __lowerCamelCase ( self : Dict , A : str , A : int , A : int ) ->Callable[[], None]: lowerCamelCase__ : Tuple = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError('''Mixed precision is currently not supported.''' ) lowerCamelCase__ : Tuple = ( hasattr(A , '''architectures''' ) and isinstance(config.architectures , A ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: lowerCamelCase__ : Any = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model lowerCamelCase__ : List[Any] = __import__('''transformers''' , fromlist=[model_class] ) lowerCamelCase__ : int = getattr(A , A ) lowerCamelCase__ : int = model_cls(A ) except ImportError: raise ImportError( F"{model_class} does not exist. If you just want to test the pretrained model, you might want to" ''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' ) else: lowerCamelCase__ : Union[str, Any] = TF_MODEL_MAPPING[config.__class__](A ) # encoder-decoder has vocab size saved differently lowerCamelCase__ : Tuple = config.vocab_size if hasattr(A , '''vocab_size''' ) else config.encoder.vocab_size lowerCamelCase__ : Optional[Any] = random_input_ids(A , A , A ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(A , decoder_input_ids=A , training=A ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(A , training=A ) lowerCamelCase__ : int = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def __lowerCamelCase ( self : List[str] , A : str , A : int , A : int ) ->Callable[[], None]: lowerCamelCase__ : Tuple = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' ) if self.args.fpaa: raise NotImplementedError('''Mixed precision is currently not supported.''' ) lowerCamelCase__ : Optional[int] = ( hasattr(A , '''architectures''' ) and isinstance(config.architectures , A ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: lowerCamelCase__ : Any = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model lowerCamelCase__ : List[str] = __import__('''transformers''' , fromlist=[model_class] ) lowerCamelCase__ : Optional[int] = getattr(A , A ) lowerCamelCase__ : Optional[Any] = model_cls(A ) except ImportError: raise ImportError( F"{model_class} does not exist. If you just want to test the pretrained model, you might want to" ''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' ) else: lowerCamelCase__ : List[str] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](A ) # encoder-decoder has vocab size saved differently lowerCamelCase__ : Optional[int] = config.vocab_size if hasattr(A , '''vocab_size''' ) else config.encoder.vocab_size lowerCamelCase__ : Dict = random_input_ids(A , A , A ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): lowerCamelCase__ : int = model(A , decoder_input_ids=A , labels=A , training=A )[0] lowerCamelCase__ : List[Any] = tf.gradients(A , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): lowerCamelCase__ : Optional[int] = model(A , labels=A , training=A )[0] lowerCamelCase__ : List[str] = tf.gradients(A , model.trainable_variables ) return gradients lowerCamelCase__ : Tuple = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def __lowerCamelCase ( self : Tuple , A : Any ) ->float: with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' ) timeit.repeat(A , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average lowerCamelCase__ : Optional[Any] = timeit.repeat( A , repeat=self.args.repeat , number=1_0 , ) return min(A ) / 10.0 except ResourceExhaustedError as e: self.print_fn(F"Doesn't fit on GPU. {e}" ) def __lowerCamelCase ( self : List[Any] , A : Callable[[], None] ) ->[Memory, MemorySummary]: logger.info( '''Note that TensorFlow allocates more memory than ''' '''it might need to speed up computation. ''' '''The memory reported here corresponds to the memory ''' '''reported by `nvidia-smi`, which can vary depending ''' '''on total available memory on the GPU that is used.''' ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( '''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory''' ''' consumption line by line.''' ) lowerCamelCase__ : Union[str, Any] = start_memory_tracing('''transformers''' ) if self.args.is_tpu: # tpu raise NotImplementedError( '''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking''' ''' with `args.memory=False`''' ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( '''py3nvml not installed, we won\'t log GPU memory usage. ''' '''Install py3nvml (pip install py3nvml) to log information about GPU.''' ) lowerCamelCase__ : Union[str, Any] = '''N/A''' else: logger.info( '''Measuring total GPU usage on GPU device. Make sure to not have additional processes''' ''' running on the same GPU.''' ) # init nvml nvml.nvmlInit() func() lowerCamelCase__ : Any = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) lowerCamelCase__ : Optional[int] = nvml.nvmlDeviceGetMemoryInfo(A ) lowerCamelCase__ : List[Any] = meminfo.used lowerCamelCase__ : Union[str, Any] = Memory(A ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( '''When enabling line by line tracing, the max peak memory for CPU is inaccurate in''' ''' TensorFlow.''' ) lowerCamelCase__ : Tuple = None else: lowerCamelCase__ : Dict = measure_peak_memory_cpu(A ) lowerCamelCase__ : Optional[Any] = Memory(A ) if isinstance(A , A ) else memory_bytes if self.args.trace_memory_line_by_line: lowerCamelCase__ : Union[str, Any] = stop_memory_tracing(A ) if memory is None: lowerCamelCase__ : Dict = summary.total else: lowerCamelCase__ : Optional[int] = None return memory, summary except ResourceExhaustedError as e: self.print_fn(F"Doesn't fit on GPU. {e}" ) return "N/A", None
265
1
from __future__ import annotations import unittest from transformers import DistilBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.distilbert.modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertModel, ) class __snake_case : def __init__( self , __UpperCamelCase , ) -> str: '''simple docstring''' snake_case__ : Optional[int] = parent snake_case__ : Union[str, Any] = 13 snake_case__ : int = 7 snake_case__ : str = True snake_case__ : Dict = True snake_case__ : Tuple = False snake_case__ : Union[str, Any] = True snake_case__ : Dict = 99 snake_case__ : Tuple = 32 snake_case__ : Optional[int] = 2 snake_case__ : Dict = 4 snake_case__ : Dict = 37 snake_case__ : Any = 'gelu' snake_case__ : Any = 0.1 snake_case__ : Any = 0.1 snake_case__ : List[Any] = 512 snake_case__ : Optional[Any] = 16 snake_case__ : Optional[int] = 2 snake_case__ : List[Any] = 0.0_2 snake_case__ : Tuple = 3 snake_case__ : Dict = 4 snake_case__ : Tuple = None def __a ( self ) -> Union[str, Any]: '''simple docstring''' snake_case__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case__ : Optional[Any] = None if self.use_input_mask: snake_case__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) snake_case__ : Any = None snake_case__ : Union[str, Any] = None snake_case__ : Tuple = None if self.use_labels: snake_case__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case__ : List[str] = ids_tensor([self.batch_size] , self.num_choices ) snake_case__ : List[str] = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict: '''simple docstring''' snake_case__ : Optional[int] = TFDistilBertModel(config=__UpperCamelCase ) snake_case__ : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask} snake_case__ : List[str] = model(__UpperCamelCase ) snake_case__ : Union[str, Any] = [input_ids, input_mask] snake_case__ : Optional[int] = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]: '''simple docstring''' snake_case__ : str = TFDistilBertForMaskedLM(config=__UpperCamelCase ) snake_case__ : int = {'input_ids': input_ids, 'attention_mask': input_mask} snake_case__ : str = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]: '''simple docstring''' snake_case__ : Dict = TFDistilBertForQuestionAnswering(config=__UpperCamelCase ) snake_case__ : Tuple = { 'input_ids': input_ids, 'attention_mask': input_mask, } snake_case__ : Optional[Any] = model(__UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' snake_case__ : List[str] = self.num_labels snake_case__ : Tuple = TFDistilBertForSequenceClassification(__UpperCamelCase ) snake_case__ : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask} snake_case__ : str = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int: '''simple docstring''' snake_case__ : Optional[int] = self.num_choices snake_case__ : str = TFDistilBertForMultipleChoice(__UpperCamelCase ) snake_case__ : Dict = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) snake_case__ : Tuple = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) snake_case__ : Dict = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, } snake_case__ : Any = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' snake_case__ : Optional[int] = self.num_labels snake_case__ : Optional[int] = TFDistilBertForTokenClassification(__UpperCamelCase ) snake_case__ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask} snake_case__ : str = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self ) -> Union[str, Any]: '''simple docstring''' snake_case__ : Optional[Any] = self.prepare_config_and_inputs() ((snake_case__) , (snake_case__) , (snake_case__) , (snake_case__) , (snake_case__) , (snake_case__)) : Tuple = config_and_inputs snake_case__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ): __lowerCamelCase = ( ( TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertForMultipleChoice, ) if is_tf_available() else None ) __lowerCamelCase = ( { """feature-extraction""": TFDistilBertModel, """fill-mask""": TFDistilBertForMaskedLM, """question-answering""": TFDistilBertForQuestionAnswering, """text-classification""": TFDistilBertForSequenceClassification, """token-classification""": TFDistilBertForTokenClassification, """zero-shot""": TFDistilBertForSequenceClassification, } if is_tf_available() else {} ) __lowerCamelCase = False __lowerCamelCase = False def __a ( self ) -> Tuple: '''simple docstring''' snake_case__ : Dict = TFDistilBertModelTester(self ) snake_case__ : Tuple = ConfigTester(self , config_class=__UpperCamelCase , dim=37 ) def __a ( self ) -> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def __a ( self ) -> Tuple: '''simple docstring''' snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*__UpperCamelCase ) def __a ( self ) -> Union[str, Any]: '''simple docstring''' snake_case__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*__UpperCamelCase ) def __a ( self ) -> List[str]: '''simple docstring''' snake_case__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*__UpperCamelCase ) def __a ( self ) -> Optional[Any]: '''simple docstring''' snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*__UpperCamelCase ) def __a ( self ) -> Union[str, Any]: '''simple docstring''' snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*__UpperCamelCase ) def __a ( self ) -> Optional[int]: '''simple docstring''' snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*__UpperCamelCase ) @slow def __a ( self ) -> Any: '''simple docstring''' for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ): snake_case__ : Optional[Any] = TFDistilBertModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) @require_tf class __snake_case ( unittest.TestCase ): @slow def __a ( self ) -> Any: '''simple docstring''' snake_case__ : Optional[int] = TFDistilBertModel.from_pretrained('distilbert-base-uncased' ) snake_case__ : Any = tf.constant([[0, 1, 2, 3, 4, 5]] ) snake_case__ : Dict = model(__UpperCamelCase )[0] snake_case__ : Optional[int] = [1, 6, 768] self.assertEqual(output.shape , __UpperCamelCase ) snake_case__ : List[Any] = tf.constant( [ [ [0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9], [0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4], [0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 )
143
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: lowerCAmelCase__ : Dict = None lowerCAmelCase__ : Tuple = logging.get_logger(__name__) lowerCAmelCase__ : Union[str, Any] = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase__ : Dict = { '''vocab_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''', }, '''tokenizer_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''', }, } lowerCAmelCase__ : List[Any] = { '''google/rembert''': 2_56, } lowerCAmelCase__ : List[str] = '''▁''' class __snake_case ( _lowerCamelCase ): __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = RemBertTokenizer def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="[CLS]" , __UpperCamelCase="[SEP]" , __UpperCamelCase="<unk>" , __UpperCamelCase="[SEP]" , __UpperCamelCase="<pad>" , __UpperCamelCase="[CLS]" , __UpperCamelCase="[MASK]" , **__UpperCamelCase , ) -> Tuple: '''simple docstring''' snake_case__ : Optional[Any] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token super().__init__( __UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , remove_space=__UpperCamelCase , keep_accents=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , **__UpperCamelCase , ) snake_case__ : int = do_lower_case snake_case__ : Any = remove_space snake_case__ : List[Any] = keep_accents snake_case__ : Dict = vocab_file snake_case__ : int = False if not self.vocab_file else True def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]: '''simple docstring''' snake_case__ : Dict = [self.sep_token_id] snake_case__ : List[str] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __a ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase )) + [1] return [1] + ([0] * len(__UpperCamelCase )) + [1] def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]: '''simple docstring''' snake_case__ : List[Any] = [self.sep_token_id] snake_case__ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(__UpperCamelCase ): logger.error('Vocabulary path ({}) should be a directory'.format(__UpperCamelCase ) ) return snake_case__ : List[str] = os.path.join( __UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ): copyfile(self.vocab_file , __UpperCamelCase ) return (out_vocab_file,)
143
1
"""simple docstring""" from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class _UpperCAmelCase ( a_, a_ ): '''simple docstring''' @register_to_config def __init__(self , a_ , a_ = None , a_ = None ): '''simple docstring''' super().__init__() __snake_case : Optional[Any] = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" __snake_case : Tuple = torch.zeros(lowercase_ , lowercase_ ) else: __snake_case : Any = None __snake_case : List[str] = torch.nn.Parameter(lowercase_ ) class _UpperCAmelCase ( a_ ): '''simple docstring''' lowerCamelCase__ =42 lowerCamelCase__ =42 lowerCamelCase__ =42 lowerCamelCase__ =42 lowerCamelCase__ =42 lowerCamelCase__ =42 def __init__(self , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' super().__init__() self.register_modules( vqvae=lowercase_ , transformer=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , scheduler=lowercase_ , learned_classifier_free_sampling_embeddings=lowercase_ , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ): '''simple docstring''' __snake_case : int = len(lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else 1 # get prompt text embeddings __snake_case : int = self.tokenizer( lowercase_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , ) __snake_case : List[str] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: __snake_case : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( '''The following part of your input was truncated because CLIP can only handle sequences up to''' f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" ) __snake_case : Optional[Any] = text_input_ids[:, : self.tokenizer.model_max_length] __snake_case : Tuple = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 __snake_case : str = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=lowercase_ ) # duplicate text embeddings for each generation per prompt __snake_case : Tuple = prompt_embeds.repeat_interleave(lowercase_ , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: __snake_case : Optional[Any] = self.learned_classifier_free_sampling_embeddings.embeddings __snake_case : str = negative_prompt_embeds.unsqueeze(0 ).repeat(lowercase_ , 1 , 1 ) else: __snake_case : List[Any] = [''''''] * batch_size __snake_case : Any = text_input_ids.shape[-1] __snake_case : Optional[Any] = self.tokenizer( lowercase_ , padding='''max_length''' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='''pt''' , ) __snake_case : List[str] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings __snake_case : List[Any] = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=lowercase_ ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method __snake_case : Dict = negative_prompt_embeds.shape[1] __snake_case : List[Any] = negative_prompt_embeds.repeat(1 , lowercase_ , 1 ) __snake_case : str = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowercase_ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __snake_case : Optional[int] = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__(self , a_ , a_ = 1_00 , a_ = 5.0 , a_ = 1.0 , a_ = 1 , a_ = None , a_ = None , a_ = "pil" , a_ = True , a_ = None , a_ = 1 , ): '''simple docstring''' if isinstance(lowercase_ , lowercase_ ): __snake_case : int = 1 elif isinstance(lowercase_ , lowercase_ ): __snake_case : Optional[int] = len(lowercase_ ) else: raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(lowercase_ )}""" ) __snake_case : Any = batch_size * num_images_per_prompt __snake_case : List[Any] = guidance_scale > 1.0 __snake_case : Optional[int] = self._encode_prompt(lowercase_ , lowercase_ , lowercase_ ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0) ): raise ValueError( f"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" f""" {type(lowercase_ )}.""" ) # get the initial completely masked latents unless the user supplied it __snake_case : int = (batch_size, self.transformer.num_latent_pixels) if latents is None: __snake_case : List[str] = self.transformer.num_vector_embeds - 1 __snake_case : Any = torch.full(lowercase_ , lowercase_ ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( '''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,''' f""" {self.transformer.num_vector_embeds - 1} (inclusive).""" ) __snake_case : Optional[Any] = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(lowercase_ , device=self.device ) __snake_case : Dict = self.scheduler.timesteps.to(self.device ) __snake_case : Any = latents for i, t in enumerate(self.progress_bar(lowercase_ ) ): # expand the sample if we are doing classifier free guidance __snake_case : Dict = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` __snake_case : Dict = self.transformer(lowercase_ , encoder_hidden_states=lowercase_ , timestep=lowercase_ ).sample if do_classifier_free_guidance: __snake_case : List[Any] = model_output.chunk(2 ) __snake_case : Tuple = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(lowercase_ , dim=1 , keepdim=lowercase_ ) __snake_case : Dict = self.truncate(lowercase_ , lowercase_ ) # remove `log(0)`'s (`-inf`s) __snake_case : Optional[Any] = model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 __snake_case : List[str] = self.scheduler.step(lowercase_ , timestep=lowercase_ , sample=lowercase_ , generator=lowercase_ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowercase_ , lowercase_ , lowercase_ ) __snake_case : List[str] = self.vqvae.config.vq_embed_dim __snake_case : Optional[Any] = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) __snake_case : Optional[Any] = self.vqvae.quantize.get_codebook_entry(lowercase_ , shape=lowercase_ ) __snake_case : Dict = self.vqvae.decode(lowercase_ , force_not_quantize=lowercase_ ).sample __snake_case : List[Any] = (image / 2 + 0.5).clamp(0 , 1 ) __snake_case : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __snake_case : Optional[Any] = self.numpy_to_pil(lowercase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowercase_ ) def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' __snake_case : Optional[Any] = torch.sort(lowercase_ , 1 , descending=lowercase_ ) __snake_case : Any = torch.exp(lowercase_ ) __snake_case : Union[str, Any] = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out __snake_case : Optional[int] = torch.full_like(keep_mask[:, 0:1, :] , lowercase_ ) __snake_case : Tuple = torch.cat((all_true, keep_mask) , dim=1 ) __snake_case : List[str] = keep_mask[:, :-1, :] __snake_case : Optional[Any] = keep_mask.gather(1 , indices.argsort(1 ) ) __snake_case : Dict = log_p_x_0.clone() __snake_case : Tuple = -torch.inf # -inf = log(0) return rv
357
"""simple docstring""" import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def lowercase ( _snake_case : str , _snake_case : str , _snake_case : str ) ->List[Any]: """simple docstring""" def get_masked_lm_array(_snake_case : str ): __snake_case : int = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : str = tf.train.load_variable(_snake_case , _snake_case ) if "kernel" in name: __snake_case : Any = array.transpose() return torch.from_numpy(_snake_case ) def get_encoder_array(_snake_case : str ): __snake_case : List[str] = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : Union[str, Any] = tf.train.load_variable(_snake_case , _snake_case ) if "kernel" in name: __snake_case : Optional[int] = array.transpose() return torch.from_numpy(_snake_case ) def get_encoder_layer_array(_snake_case : int , _snake_case : str ): __snake_case : str = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : Optional[int] = tf.train.load_variable(_snake_case , _snake_case ) if "kernel" in name: __snake_case : Optional[Any] = array.transpose() return torch.from_numpy(_snake_case ) def get_encoder_attention_layer_array(_snake_case : int , _snake_case : str , _snake_case : str ): __snake_case : Any = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : Dict = tf.train.load_variable(_snake_case , _snake_case ) __snake_case : int = array.reshape(_snake_case ) if "kernel" in name: __snake_case : Optional[int] = array.transpose() return torch.from_numpy(_snake_case ) print(f"""Loading model based on config from {config_path}...""" ) __snake_case : Optional[Any] = BertConfig.from_json_file(_snake_case ) __snake_case : Dict = BertForMaskedLM(_snake_case ) # Layers for layer_index in range(0 , config.num_hidden_layers ): __snake_case : BertLayer = model.bert.encoder.layer[layer_index] # Self-attention __snake_case : BertSelfAttention = layer.attention.self __snake_case : int = get_encoder_attention_layer_array( _snake_case , '''_query_dense/kernel''' , self_attn.query.weight.data.shape ) __snake_case : str = get_encoder_attention_layer_array( _snake_case , '''_query_dense/bias''' , self_attn.query.bias.data.shape ) __snake_case : str = get_encoder_attention_layer_array( _snake_case , '''_key_dense/kernel''' , self_attn.key.weight.data.shape ) __snake_case : List[Any] = get_encoder_attention_layer_array( _snake_case , '''_key_dense/bias''' , self_attn.key.bias.data.shape ) __snake_case : Tuple = get_encoder_attention_layer_array( _snake_case , '''_value_dense/kernel''' , self_attn.value.weight.data.shape ) __snake_case : Union[str, Any] = get_encoder_attention_layer_array( _snake_case , '''_value_dense/bias''' , self_attn.value.bias.data.shape ) # Self-attention Output __snake_case : BertSelfOutput = layer.attention.output __snake_case : Dict = get_encoder_attention_layer_array( _snake_case , '''_output_dense/kernel''' , self_output.dense.weight.data.shape ) __snake_case : Tuple = get_encoder_attention_layer_array( _snake_case , '''_output_dense/bias''' , self_output.dense.bias.data.shape ) __snake_case : str = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/gamma''' ) __snake_case : Any = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/beta''' ) # Intermediate __snake_case : BertIntermediate = layer.intermediate __snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/kernel''' ) __snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/bias''' ) # Output __snake_case : BertOutput = layer.output __snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_dense/kernel''' ) __snake_case : Dict = get_encoder_layer_array(_snake_case , '''_output_dense/bias''' ) __snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/gamma''' ) __snake_case : Union[str, Any] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/beta''' ) # Embeddings __snake_case : Optional[int] = get_encoder_array('''_position_embedding_layer/embeddings''' ) __snake_case : str = get_encoder_array('''_type_embedding_layer/embeddings''' ) __snake_case : int = get_encoder_array('''_embedding_norm_layer/gamma''' ) __snake_case : Tuple = get_encoder_array('''_embedding_norm_layer/beta''' ) # LM Head __snake_case : Optional[Any] = model.cls.predictions.transform __snake_case : Dict = get_masked_lm_array('''dense/kernel''' ) __snake_case : Union[str, Any] = get_masked_lm_array('''dense/bias''' ) __snake_case : str = get_masked_lm_array('''layer_norm/gamma''' ) __snake_case : Tuple = get_masked_lm_array('''layer_norm/beta''' ) __snake_case : Tuple = get_masked_lm_array('''embedding_table''' ) # Pooling __snake_case : Optional[Any] = BertPooler(config=_snake_case ) __snake_case : BertPooler = get_encoder_array('''_pooler_layer/kernel''' ) __snake_case : BertPooler = get_encoder_array('''_pooler_layer/bias''' ) # Export final model model.save_pretrained(_snake_case ) # Integration test - should load without any errors ;) __snake_case : Dict = BertForMaskedLM.from_pretrained(_snake_case ) print(new_model.eval() ) print('''Model conversion was done sucessfully!''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser() parser.add_argument( """--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path.""" ) parser.add_argument( """--bert_config_file""", type=str, required=True, help="""The config json file corresponding to the BERT model. This specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", type=str, required=True, help="""Path to the output PyTorch model.""", ) SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
24
0
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo __a = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n' __a = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n' __a = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase__( datasets.Metric ): """simple docstring""" def _lowercase ( self : Optional[Any] ) -> MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ), } ) , ) def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[List[List[str]]] , SCREAMING_SNAKE_CASE_ : List[List[str]] , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : int = 4 , ) -> Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=SCREAMING_SNAKE_CASE_ , hypotheses=SCREAMING_SNAKE_CASE_ , min_len=SCREAMING_SNAKE_CASE_ , max_len=SCREAMING_SNAKE_CASE_ ) }
30
"""simple docstring""" from typing import List import numpy as np def __lowerCAmelCase ( lowercase : dict ) -> int: """simple docstring""" snake_case : Union[str, Any] = {key: len(lowercase ) for key, value in gen_kwargs.items() if isinstance(lowercase , lowercase )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( "Sharding is ambiguous for this dataset: " + "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n" + "\n".join(F'\t- key {key} has length {length}' for key, length in lists_lengths.items() ) + "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, " + "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length." ) ) snake_case : int = max(lists_lengths.values() , default=0 ) return max(1 , lowercase ) def __lowerCAmelCase ( lowercase : int , lowercase : int ) -> List[range]: """simple docstring""" snake_case : Union[str, Any] = [] for group_idx in range(lowercase ): snake_case : Union[str, Any] = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break snake_case : int = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 snake_case : Dict = range(lowercase , start + num_shards_to_add ) shards_indices_per_group.append(lowercase ) return shards_indices_per_group def __lowerCAmelCase ( lowercase : dict , lowercase : int ) -> List[dict]: """simple docstring""" snake_case : int = _number_of_shards_in_gen_kwargs(lowercase ) if num_shards == 1: return [dict(lowercase )] else: snake_case : Optional[int] = _distribute_shards(num_shards=lowercase , max_num_jobs=lowercase ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(lowercase , lowercase ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(lowercase ) ) ] def __lowerCAmelCase ( lowercase : List[dict] ) -> dict: """simple docstring""" return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , lowercase ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def __lowerCAmelCase ( lowercase : np.random.Generator , lowercase : dict ) -> dict: """simple docstring""" snake_case : Tuple = {len(lowercase ) for value in gen_kwargs.values() if isinstance(lowercase , lowercase )} snake_case : str = {} for size in list_sizes: snake_case : Optional[int] = list(range(lowercase ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes snake_case : Dict = dict(lowercase ) for key, value in shuffled_kwargs.items(): if isinstance(lowercase , lowercase ): snake_case : Dict = [value[i] for i in indices_per_size[len(lowercase )]] return shuffled_kwargs
203
0
'''simple docstring''' import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from accelerate.utils import ( OffloadedWeightsLoader, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, ) class UpperCamelCase_ (nn.Module ): """simple docstring""" def __init__( self : List[str] ): """simple docstring""" super().__init__() A_ : Any = nn.Linear(3 , 4 ) A_ : Optional[Any] = nn.BatchNormad(4 ) A_ : str = nn.Linear(4 , 5 ) def _a ( self : Dict , _lowerCamelCase : Dict ): """simple docstring""" return self.lineara(self.batchnorm(self.lineara(_lowerCamelCase ) ) ) class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : Optional[int] ): """simple docstring""" A_ : Optional[Any] = ModelForTest() with TemporaryDirectory() as tmp_dir: offload_state_dict(_lowerCamelCase , model.state_dict() ) A_ : Tuple = os.path.join(_lowerCamelCase , '''index.json''' ) self.assertTrue(os.path.isfile(_lowerCamelCase ) ) # TODO: add tests on what is inside the index for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: A_ : List[Any] = os.path.join(_lowerCamelCase , f'{key}.dat' ) self.assertTrue(os.path.isfile(_lowerCamelCase ) ) # TODO: add tests on the fact weights are properly loaded def _a ( self : Tuple ): """simple docstring""" A_ : List[Any] = [torch.floataa, torch.floataa, torch.bfloataa] for dtype in dtypes: A_ : List[str] = torch.randn(2 , 3 , dtype=_lowerCamelCase ) with TemporaryDirectory() as tmp_dir: A_ : Dict = offload_weight(_lowerCamelCase , '''weight''' , _lowerCamelCase , {} ) A_ : Optional[Any] = os.path.join(_lowerCamelCase , '''weight.dat''' ) self.assertTrue(os.path.isfile(_lowerCamelCase ) ) self.assertDictEqual(_lowerCamelCase , {'''weight''': {'''shape''': [2, 3], '''dtype''': str(_lowerCamelCase ).split('''.''' )[1]}} ) A_ : Any = load_offloaded_weight(_lowerCamelCase , index['''weight'''] ) self.assertTrue(torch.equal(_lowerCamelCase , _lowerCamelCase ) ) def _a ( self : str ): """simple docstring""" A_ : Any = ModelForTest() A_ : Any = model.state_dict() A_ : Optional[Any] = {k: v for k, v in state_dict.items() if '''linear2''' not in k} A_ : str = {k: v for k, v in state_dict.items() if '''linear2''' in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(_lowerCamelCase , _lowerCamelCase ) A_ : Any = OffloadedWeightsLoader(state_dict=_lowerCamelCase , save_folder=_lowerCamelCase ) # Every key is there with the right value self.assertEqual(sorted(_lowerCamelCase ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(_lowerCamelCase , weight_map[key] ) ) A_ : List[str] = {k: v for k, v in state_dict.items() if '''weight''' in k} A_ : Optional[Any] = {k: v for k, v in state_dict.items() if '''weight''' not in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(_lowerCamelCase , _lowerCamelCase ) A_ : List[str] = OffloadedWeightsLoader(state_dict=_lowerCamelCase , save_folder=_lowerCamelCase ) # Every key is there with the right value self.assertEqual(sorted(_lowerCamelCase ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(_lowerCamelCase , weight_map[key] ) ) with TemporaryDirectory() as tmp_dir: offload_state_dict(_lowerCamelCase , _lowerCamelCase ) # Duplicates are removed A_ : Tuple = OffloadedWeightsLoader(state_dict=_lowerCamelCase , save_folder=_lowerCamelCase ) # Every key is there with the right value self.assertEqual(sorted(_lowerCamelCase ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(_lowerCamelCase , weight_map[key] ) ) def _a ( self : Dict ): """simple docstring""" A_ : Optional[int] = {'''a.1''': 0, '''a.10''': 1, '''a.2''': 2} A_ : Tuple = extract_submodules_state_dict(_lowerCamelCase , ['''a.1''', '''a.2'''] ) self.assertDictEqual(_lowerCamelCase , {'''a.1''': 0, '''a.2''': 2} ) A_ : Optional[int] = {'''a.1.a''': 0, '''a.10.a''': 1, '''a.2.a''': 2} A_ : Optional[int] = extract_submodules_state_dict(_lowerCamelCase , ['''a.1''', '''a.2'''] ) self.assertDictEqual(_lowerCamelCase , {'''a.1.a''': 0, '''a.2.a''': 2} )
4
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = ( 'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.' 'It takes two arguments named `image` which should be the original image, and `label` which should be a text ' 'describing the elements what should be identified in the segmentation mask. The tool returns the mask.' ) _lowerCAmelCase = 'CIDAS/clipseg-rd64-refined' _lowerCAmelCase = 'image_segmenter' _lowerCAmelCase = CLIPSegForImageSegmentation _lowerCAmelCase = ['image', 'text'] _lowerCAmelCase = ['image'] def __init__( self : Optional[int] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Union[str, Any] ): """simple docstring""" requires_backends(self , ['''vision'''] ) super().__init__(*_lowerCamelCase , **_lowerCamelCase ) def _a ( self : List[str] , _lowerCamelCase : "Image" , _lowerCamelCase : str ): """simple docstring""" return self.pre_processor(text=[label] , images=[image] , padding=_lowerCamelCase , return_tensors='''pt''' ) def _a ( self : Union[str, Any] , _lowerCamelCase : Optional[int] ): """simple docstring""" with torch.no_grad(): A_ : Optional[int] = self.model(**_lowerCamelCase ).logits return logits def _a ( self : List[str] , _lowerCamelCase : Optional[int] ): """simple docstring""" A_ : int = outputs.cpu().detach().numpy() A_ : Tuple = 0 A_ : List[str] = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
4
1
import json import logging import os import re import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import datasets import numpy as np import torch import torchaudio from packaging import version from torch import nn import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaProcessor, is_apex_available, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"): lowerCamelCase : List[str] = True from torch.cuda.amp import autocast lowerCamelCase : Tuple = logging.getLogger(__name__) def _SCREAMING_SNAKE_CASE ( lowercase : int=None , lowercase : Tuple=None ): '''simple docstring''' return field(default_factory=lambda: default , metadata=lowercase ) @dataclass class A: '''simple docstring''' UpperCamelCase = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) UpperCamelCase = field( default=UpperCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) UpperCamelCase = field( default=UpperCamelCase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} ) UpperCamelCase = field( default=0.1 , metadata={'''help''': '''The dropout ratio for the attention probabilities.'''} ) UpperCamelCase = field( default=0.1 , metadata={'''help''': '''The dropout ratio for activations inside the fully connected layer.'''} ) UpperCamelCase = field( default=0.1 , metadata={ '''help''': '''The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.''' } , ) UpperCamelCase = field( default=0.1 , metadata={'''help''': '''The dropout probabilitiy for all 1D convolutional layers in feature extractor.'''} , ) UpperCamelCase = field( default=0.05 , metadata={ '''help''': ( '''Propability of each feature vector along the time axis to be chosen as the start of the vector''' '''span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature''' '''vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.''' ) } , ) UpperCamelCase = field(default=0.0 , metadata={'''help''': '''The LayerDrop probability.'''} ) @dataclass class A: '''simple docstring''' UpperCamelCase = field( default=UpperCamelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) UpperCamelCase = field( default='''train+validation''' , metadata={ '''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\'''' } , ) UpperCamelCase = field( default=UpperCamelCase , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} ) UpperCamelCase = field( default=UpperCamelCase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , ) UpperCamelCase = field( default=UpperCamelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCamelCase = field( default=UpperCamelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of validation examples to this ''' '''value if set.''' ) } , ) UpperCamelCase = list_field( default=[''',''', '''?''', '''.''', '''!''', '''-''', ''';''', ''':''', '''""''', '''%''', '''\'''', '''"''', '''�'''] , metadata={'''help''': '''A list of characters to remove from the transcripts.'''} , ) @dataclass class A: '''simple docstring''' UpperCamelCase = 42 UpperCamelCase = True UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None def __call__( self : Optional[Any] , A_ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]: """simple docstring""" lowerCamelCase_ = [{'input_values': feature['input_values']} for feature in features] lowerCamelCase_ = [{'input_ids': feature['labels']} for feature in features] lowerCamelCase_ = self.processor.pad( A_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , ) lowerCamelCase_ = self.processor.pad( labels=A_ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='pt' , ) # replace padding with -100 to ignore loss correctly lowerCamelCase_ = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 ) lowerCamelCase_ = labels return batch class A( UpperCamelCase ): '''simple docstring''' def a__ ( self : int , A_ : nn.Module , A_ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor: """simple docstring""" model.train() lowerCamelCase_ = self._prepare_inputs(A_ ) if self.use_amp: with autocast(): lowerCamelCase_ = self.compute_loss(A_ , A_ ) else: lowerCamelCase_ = self.compute_loss(A_ , A_ ) if self.args.n_gpu > 1: if model.module.config.ctc_loss_reduction == "mean": lowerCamelCase_ = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": lowerCamelCase_ = loss.sum() / (inputs['labels'] >= 0).sum() else: raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" ) if self.args.gradient_accumulation_steps > 1: lowerCamelCase_ = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(A_ ).backward() elif self.use_apex: with amp.scale_loss(A_ , self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(A_ ) else: loss.backward() return loss.detach() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses() # Detecting last checkpoint. lowerCamelCase_ = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowerCamelCase_ = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('Training/evaluation parameters %s' , lowercase ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: lowerCamelCase_ = datasets.load_dataset( 'common_voice' , data_args.dataset_config_name , split=data_args.train_split_name ) lowerCamelCase_ = datasets.load_dataset('common_voice' , data_args.dataset_config_name , split='test' ) # Create and save tokenizer lowerCamelCase_ = f"""[{"".join(data_args.chars_to_ignore )}]""" def remove_special_characters(lowercase : List[Any] ): lowerCamelCase_ = re.sub(lowercase , '' , batch['sentence'] ).lower() + ' ' return batch lowerCamelCase_ = train_dataset.map(lowercase , remove_columns=['sentence'] ) lowerCamelCase_ = eval_dataset.map(lowercase , remove_columns=['sentence'] ) def extract_all_chars(lowercase : Tuple ): lowerCamelCase_ = ' '.join(batch['text'] ) lowerCamelCase_ = list(set(lowercase ) ) return {"vocab": [vocab], "all_text": [all_text]} lowerCamelCase_ = train_dataset.map( lowercase , batched=lowercase , batch_size=-1 , keep_in_memory=lowercase , remove_columns=train_dataset.column_names , ) lowerCamelCase_ = train_dataset.map( lowercase , batched=lowercase , batch_size=-1 , keep_in_memory=lowercase , remove_columns=eval_dataset.column_names , ) lowerCamelCase_ = list(set(vocab_train['vocab'][0] ) | set(vocab_test['vocab'][0] ) ) lowerCamelCase_ = {v: k for k, v in enumerate(lowercase )} lowerCamelCase_ = vocab_dict[' '] del vocab_dict[" "] lowerCamelCase_ = len(lowercase ) lowerCamelCase_ = len(lowercase ) with open('vocab.json' , 'w' ) as vocab_file: json.dump(lowercase , lowercase ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCamelCase_ = WavaVecaCTCTokenizer( 'vocab.json' , unk_token='[UNK]' , pad_token='[PAD]' , word_delimiter_token='|' , ) lowerCamelCase_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0.0 , do_normalize=lowercase , return_attention_mask=lowercase ) lowerCamelCase_ = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase ) lowerCamelCase_ = WavaVecaForCTC.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='mean' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , ) if data_args.max_train_samples is not None: lowerCamelCase_ = min(len(lowercase ) , data_args.max_train_samples ) lowerCamelCase_ = train_dataset.select(range(lowercase ) ) if data_args.max_val_samples is not None: lowerCamelCase_ = eval_dataset.select(range(data_args.max_val_samples ) ) lowerCamelCase_ = torchaudio.transforms.Resample(4_80_00 , 1_60_00 ) # Preprocessing the datasets. # We need to read the aduio files as arrays and tokenize the targets. def speech_file_to_array_fn(lowercase : Tuple ): lowerCamelCase_ , lowerCamelCase_ = torchaudio.load(batch['path'] ) lowerCamelCase_ = resampler(lowercase ).squeeze().numpy() lowerCamelCase_ = 1_60_00 lowerCamelCase_ = batch['text'] return batch lowerCamelCase_ = train_dataset.map( lowercase , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , ) lowerCamelCase_ = eval_dataset.map( lowercase , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , ) def prepare_dataset(lowercase : Any ): # check that all files have the correct sampling rate assert ( len(set(batch['sampling_rate'] ) ) == 1 ), f"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.""" lowerCamelCase_ = processor( audio=batch['speech'] , text=batch['target_text'] , sampling_rate=batch['sampling_rate'][0] ) batch.update(lowercase ) return batch lowerCamelCase_ = train_dataset.map( lowercase , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=lowercase , num_proc=data_args.preprocessing_num_workers , ) lowerCamelCase_ = eval_dataset.map( lowercase , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=lowercase , num_proc=data_args.preprocessing_num_workers , ) # Metric lowerCamelCase_ = datasets.load_metric('wer' ) def compute_metrics(lowercase : int ): lowerCamelCase_ = pred.predictions lowerCamelCase_ = np.argmax(lowercase , axis=-1 ) lowerCamelCase_ = processor.tokenizer.pad_token_id lowerCamelCase_ = processor.batch_decode(lowercase ) # we do not want to group tokens when computing the metrics lowerCamelCase_ = processor.batch_decode(pred.label_ids , group_tokens=lowercase ) lowerCamelCase_ = wer_metric.compute(predictions=lowercase , references=lowercase ) return {"wer": wer} if model_args.freeze_feature_extractor: model.freeze_feature_extractor() # Data collator lowerCamelCase_ = DataCollatorCTCWithPadding(processor=lowercase , padding=lowercase ) # Initialize our Trainer lowerCamelCase_ = CTCTrainer( model=lowercase , data_collator=lowercase , args=lowercase , compute_metrics=lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , ) # Training if training_args.do_train: if last_checkpoint is not None: lowerCamelCase_ = last_checkpoint elif os.path.isdir(model_args.model_name_or_path ): lowerCamelCase_ = model_args.model_name_or_path else: lowerCamelCase_ = None # Save the feature_extractor and the tokenizer if is_main_process(training_args.local_rank ): processor.save_pretrained(training_args.output_dir ) lowerCamelCase_ = trainer.train(resume_from_checkpoint=lowercase ) trainer.save_model() lowerCamelCase_ = train_result.metrics lowerCamelCase_ = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase ) ) lowerCamelCase_ = min(lowercase , len(lowercase ) ) trainer.log_metrics('train' , lowercase ) trainer.save_metrics('train' , lowercase ) trainer.save_state() # Evaluation lowerCamelCase_ = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) lowerCamelCase_ = trainer.evaluate() lowerCamelCase_ = data_args.max_val_samples if data_args.max_val_samples is not None else len(lowercase ) lowerCamelCase_ = min(lowercase , len(lowercase ) ) trainer.log_metrics('eval' , lowercase ) trainer.save_metrics('eval' , lowercase ) return results if __name__ == "__main__": main()
204
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_fnet import FNetTokenizer else: lowerCamelCase : List[Any] = None lowerCamelCase : int = logging.get_logger(__name__) lowerCamelCase : Optional[int] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} lowerCamelCase : Optional[int] = { "vocab_file": { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model", }, "tokenizer_file": { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json", }, } lowerCamelCase : List[Any] = { "google/fnet-base": 512, "google/fnet-large": 512, } lowerCamelCase : Any = "▁" class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = VOCAB_FILES_NAMES UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase = ['''input_ids''', '''token_type_ids'''] UpperCamelCase = FNetTokenizer def __init__( self : Optional[int] , A_ : Any=None , A_ : int=None , A_ : int=False , A_ : Optional[int]=True , A_ : List[Any]=True , A_ : Tuple="<unk>" , A_ : Optional[int]="[SEP]" , A_ : List[Any]="<pad>" , A_ : Optional[int]="[CLS]" , A_ : Optional[Any]="[MASK]" , **A_ : Dict , ) -> List[str]: """simple docstring""" lowerCamelCase_ = ( AddedToken(A_ , lstrip=A_ , rstrip=A_ , normalized=A_ ) if isinstance(A_ , A_ ) else mask_token ) super().__init__( A_ , tokenizer_file=A_ , do_lower_case=A_ , remove_space=A_ , keep_accents=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , **A_ , ) lowerCamelCase_ = do_lower_case lowerCamelCase_ = remove_space lowerCamelCase_ = keep_accents lowerCamelCase_ = vocab_file lowerCamelCase_ = False if not self.vocab_file else True def a__ ( self : List[Any] , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowerCamelCase_ = [self.sep_token_id] lowerCamelCase_ = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def a__ ( self : Tuple , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowerCamelCase_ = [self.sep_token_id] lowerCamelCase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a__ ( self : Dict , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(A_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCamelCase_ = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ): copyfile(self.vocab_file , A_ ) return (out_vocab_file,)
204
1
from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class lowerCamelCase__ ( _a ): _lowerCAmelCase = 42 class lowerCamelCase__ ( nn.Module ): def __init__( self : Union[str, Any] , _a : Tuple=3 , _a : Optional[Any]=3 , _a : List[Any]=("DownEncoderBlock2D",) , _a : Any=(6_4,) , _a : Dict=2 , _a : Optional[int]=3_2 , _a : Any="silu" , _a : List[Any]=True , ): super().__init__() a__: int =layers_per_block a__: int =torch.nn.Convad( _a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) a__: List[Any] =None a__: Optional[Any] =nn.ModuleList([] ) # down a__: str =block_out_channels[0] for i, down_block_type in enumerate(_a ): a__: Union[str, Any] =output_channel a__: Optional[int] =block_out_channels[i] a__: int =i == len(_a ) - 1 a__: Dict =get_down_block( _a , num_layers=self.layers_per_block , in_channels=_a , out_channels=_a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=_a , resnet_groups=_a , attention_head_dim=_a , temb_channels=_a , ) self.down_blocks.append(_a ) # mid a__: List[str] =UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=_a , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=_a , temb_channels=_a , ) # out a__: List[Any] =nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=_a , eps=1e-6 ) a__: Dict =nn.SiLU() a__: List[Any] =2 * out_channels if double_z else out_channels a__: Tuple =nn.Convad(block_out_channels[-1] , _a , 3 , padding=1 ) a__: Union[str, Any] =False def _lowerCamelCase ( self : Union[str, Any] , _a : str ): a__: str =x a__: Tuple =self.conv_in(_a ) if self.training and self.gradient_checkpointing: def create_custom_forward(_a : Optional[int] ): def custom_forward(*_a : int ): return module(*_a ) return custom_forward # down if is_torch_version(">=" , "1.11.0" ): for down_block in self.down_blocks: a__: Optional[int] =torch.utils.checkpoint.checkpoint( create_custom_forward(_a ) , _a , use_reentrant=_a ) # middle a__: Dict =torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , _a , use_reentrant=_a ) else: for down_block in self.down_blocks: a__: List[Any] =torch.utils.checkpoint.checkpoint(create_custom_forward(_a ) , _a ) # middle a__: Optional[int] =torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , _a ) else: # down for down_block in self.down_blocks: a__: List[Any] =down_block(_a ) # middle a__: int =self.mid_block(_a ) # post-process a__: List[Any] =self.conv_norm_out(_a ) a__: str =self.conv_act(_a ) a__: Optional[int] =self.conv_out(_a ) return sample class lowerCamelCase__ ( nn.Module ): def __init__( self : List[Any] , _a : Dict=3 , _a : Optional[Any]=3 , _a : List[str]=("UpDecoderBlock2D",) , _a : Optional[int]=(6_4,) , _a : Any=2 , _a : Optional[int]=3_2 , _a : List[str]="silu" , _a : int="group" , ): super().__init__() a__: Optional[int] =layers_per_block a__: Union[str, Any] =nn.Convad( _a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) a__: Optional[int] =None a__: int =nn.ModuleList([] ) a__: Union[str, Any] =in_channels if norm_type == "spatial" else None # mid a__: Optional[Any] =UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=_a , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=_a , temb_channels=_a , ) # up a__: List[str] =list(reversed(_a ) ) a__: Union[str, Any] =reversed_block_out_channels[0] for i, up_block_type in enumerate(_a ): a__: Tuple =output_channel a__: int =reversed_block_out_channels[i] a__: Tuple =i == len(_a ) - 1 a__: List[Any] =get_up_block( _a , num_layers=self.layers_per_block + 1 , in_channels=_a , out_channels=_a , prev_output_channel=_a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=_a , resnet_groups=_a , attention_head_dim=_a , temb_channels=_a , resnet_time_scale_shift=_a , ) self.up_blocks.append(_a ) a__: Union[str, Any] =output_channel # out if norm_type == "spatial": a__: List[Any] =SpatialNorm(block_out_channels[0] , _a ) else: a__: Optional[int] =nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=_a , eps=1e-6 ) a__: Optional[int] =nn.SiLU() a__: Optional[int] =nn.Convad(block_out_channels[0] , _a , 3 , padding=1 ) a__: Tuple =False def _lowerCamelCase ( self : Union[str, Any] , _a : List[Any] , _a : Union[str, Any]=None ): a__: str =z a__: List[Any] =self.conv_in(_a ) a__: Optional[Any] =next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(_a : List[Any] ): def custom_forward(*_a : Tuple ): return module(*_a ) return custom_forward if is_torch_version(">=" , "1.11.0" ): # middle a__: Union[str, Any] =torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , _a , _a , use_reentrant=_a ) a__: int =sample.to(_a ) # up for up_block in self.up_blocks: a__: int =torch.utils.checkpoint.checkpoint( create_custom_forward(_a ) , _a , _a , use_reentrant=_a ) else: # middle a__: Union[str, Any] =torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , _a , _a ) a__: Any =sample.to(_a ) # up for up_block in self.up_blocks: a__: Tuple =torch.utils.checkpoint.checkpoint(create_custom_forward(_a ) , _a , _a ) else: # middle a__: List[str] =self.mid_block(_a , _a ) a__: List[str] =sample.to(_a ) # up for up_block in self.up_blocks: a__: List[Any] =up_block(_a , _a ) # post-process if latent_embeds is None: a__: int =self.conv_norm_out(_a ) else: a__: List[str] =self.conv_norm_out(_a , _a ) a__: Optional[Any] =self.conv_act(_a ) a__: Optional[Any] =self.conv_out(_a ) return sample class lowerCamelCase__ ( nn.Module ): def __init__( self : Any , _a : List[Any] , _a : Union[str, Any] , _a : int , _a : List[Any]=None , _a : Dict="random" , _a : Optional[int]=False , _a : str=True ): super().__init__() a__: Any =n_e a__: Dict =vq_embed_dim a__: Optional[Any] =beta a__: Optional[int] =legacy a__: Dict =nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) a__: Union[str, Any] =remap if self.remap is not None: self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) ) a__: Optional[Any] =self.used.shape[0] a__: Union[str, Any] =unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": a__: Any =self.re_embed a__: Tuple =self.re_embed + 1 print( F"Remapping {self.n_e} indices to {self.re_embed} indices. " F"Using {self.unknown_index} for unknown indices." ) else: a__: int =n_e a__: Union[str, Any] =sane_index_shape def _lowerCamelCase ( self : Any , _a : Dict ): a__: List[Any] =inds.shape assert len(_a ) > 1 a__: str =inds.reshape(ishape[0] , -1 ) a__: Optional[int] =self.used.to(_a ) a__: List[str] =(inds[:, :, None] == used[None, None, ...]).long() a__: str =match.argmax(-1 ) a__: Optional[Any] =match.sum(2 ) < 1 if self.unknown_index == "random": a__: Any =torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: a__: List[str] =self.unknown_index return new.reshape(_a ) def _lowerCamelCase ( self : Tuple , _a : int ): a__: Union[str, Any] =inds.shape assert len(_a ) > 1 a__: int =inds.reshape(ishape[0] , -1 ) a__: Dict =self.used.to(_a ) if self.re_embed > self.used.shape[0]: # extra token a__: Optional[Any] =0 # simply set to zero a__: Optional[int] =torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , _a ) return back.reshape(_a ) def _lowerCamelCase ( self : List[str] , _a : str ): # reshape z -> (batch, height, width, channel) and flatten a__: List[str] =z.permute(0 , 2 , 3 , 1 ).contiguous() a__: Optional[int] =z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z a__: Optional[Any] =torch.argmin(torch.cdist(_a , self.embedding.weight ) , dim=1 ) a__: Optional[Any] =self.embedding(_a ).view(z.shape ) a__: Optional[int] =None a__: List[str] =None # compute loss for embedding if not self.legacy: a__: str =self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: a__: Optional[int] =torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients a__: int =z + (z_q - z).detach() # reshape back to match original input shape a__: int =z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: a__: List[Any] =min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis a__: str =self.remap_to_used(_a ) a__: List[Any] =min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: a__: Optional[int] =min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def _lowerCamelCase ( self : Any , _a : Optional[Any] , _a : int ): # shape specifying (batch, height, width, channel) if self.remap is not None: a__: Tuple =indices.reshape(shape[0] , -1 ) # add batch axis a__: List[Any] =self.unmap_to_all(_a ) a__: Dict =indices.reshape(-1 ) # flatten again # get quantized latent vectors a__: Optional[Any] =self.embedding(_a ) if shape is not None: a__: Optional[Any] =z_q.view(_a ) # reshape back to match original input shape a__: Optional[int] =z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class lowerCamelCase__ ( _a ): def __init__( self : Optional[Any] , _a : List[str] , _a : Union[str, Any]=False ): a__: Optional[int] =parameters a__: str =torch.chunk(_a , 2 , dim=1 ) a__: Union[str, Any] =torch.clamp(self.logvar , -3_0.0 , 2_0.0 ) a__: Optional[Any] =deterministic a__: List[Any] =torch.exp(0.5 * self.logvar ) a__: Any =torch.exp(self.logvar ) if self.deterministic: a__: Optional[Any] =torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def _lowerCamelCase ( self : Optional[Any] , _a : Optional[torch.Generator] = None ): # make sure sample is on the same device as the parameters and has same dtype a__: int =randn_tensor( self.mean.shape , generator=_a , device=self.parameters.device , dtype=self.parameters.dtype ) a__: Dict =self.mean + self.std * sample return x def _lowerCamelCase ( self : Optional[int] , _a : Optional[int]=None ): if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def _lowerCamelCase ( self : Union[str, Any] , _a : int , _a : Dict=[1, 2, 3] ): if self.deterministic: return torch.Tensor([0.0] ) a__: Optional[int] =np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=_a ) def _lowerCamelCase ( self : str ): return self.mean
358
from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. __UpperCAmelCase = 2_00 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. __UpperCAmelCase = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. __UpperCAmelCase = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 10_00)) def __lowerCamelCase ( __magic_name__ : str , __magic_name__ : str ): a__: int =len([g for position, g in enumerate(__magic_name__ ) if g == main_target[position]] ) return (item, float(__magic_name__ )) def __lowerCamelCase ( __magic_name__ : str , __magic_name__ : str ): a__: Any =random.randint(0 , len(__magic_name__ ) - 1 ) a__: Tuple =parent_a[:random_slice] + parent_a[random_slice:] a__: List[str] =parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def __lowerCamelCase ( __magic_name__ : str , __magic_name__ : list[str] ): a__: str =list(__magic_name__ ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: a__: Union[str, Any] =random.choice(__magic_name__ ) return "".join(__magic_name__ ) def __lowerCamelCase ( __magic_name__ : tuple[str, float] , __magic_name__ : list[tuple[str, float]] , __magic_name__ : list[str] , ): a__: List[Any] =[] # Generate more children proportionally to the fitness score. a__: Dict =int(parent_a[1] * 100 ) + 1 a__: Tuple =10 if child_n >= 10 else child_n for _ in range(__magic_name__ ): a__: List[str] =population_score[random.randint(0 , __magic_name__ )][0] a__ , a__: Dict =crossover(parent_a[0] , __magic_name__ ) # Append new string to the population list. pop.append(mutate(__magic_name__ , __magic_name__ ) ) pop.append(mutate(__magic_name__ , __magic_name__ ) ) return pop def __lowerCamelCase ( __magic_name__ : str , __magic_name__ : list[str] , __magic_name__ : bool = True ): # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: a__: Any =F"{N_POPULATION} must be bigger than {N_SELECTED}" raise ValueError(__magic_name__ ) # Verify that the target contains no genes besides the ones inside genes variable. a__: int =sorted({c for c in target if c not in genes} ) if not_in_genes_list: a__: str =F"{not_in_genes_list} is not in genes list, evolution cannot converge" raise ValueError(__magic_name__ ) # Generate random starting population. a__: Tuple =[] for _ in range(__magic_name__ ): population.append("".join([random.choice(__magic_name__ ) for i in range(len(__magic_name__ ) )] ) ) # Just some logs to know what the algorithms is doing. a__ , a__: Any =0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(__magic_name__ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. a__: Dict =[evaluate(__magic_name__ , __magic_name__ ) for item in population] # Check if there is a matching evolution. a__: Any =sorted(__magic_name__ , key=lambda __magic_name__ : x[1] , reverse=__magic_name__ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( F"\nGeneration: {generation}" F"\nTotal Population:{total_population}" F"\nBest score: {population_score[0][1]}" F"\nBest string: {population_score[0][0]}" ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. a__: Optional[int] =population[: int(N_POPULATION / 3 )] population.clear() population.extend(__magic_name__ ) # Normalize population score to be between 0 and 1. a__: List[str] =[ (item, score / len(__magic_name__ )) for item, score in population_score ] # This is selection for i in range(__magic_name__ ): population.extend(select(population_score[int(__magic_name__ )] , __magic_name__ , __magic_name__ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(__magic_name__ ) > N_POPULATION: break if __name__ == "__main__": __UpperCAmelCase = ( '''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!''' ) __UpperCAmelCase = list( ''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm''' '''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\''' ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = basic(target_str, genes_list) print( f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}""" )
42
0
"""simple docstring""" import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class a ( __SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE : List[Any] = """M-CLIP""" def __init__( self : Any , __SCREAMING_SNAKE_CASE : int=1024 , __SCREAMING_SNAKE_CASE : Dict=768 , **__SCREAMING_SNAKE_CASE : Dict ) -> List[str]: lowerCamelCase_ = transformerDimSize lowerCamelCase_ = imageDimSize super().__init__(**__SCREAMING_SNAKE_CASE ) class a ( __SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE : List[Any] = MCLIPConfig def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]: super().__init__(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) lowerCamelCase_ = XLMRobertaModel(__SCREAMING_SNAKE_CASE ) lowerCamelCase_ = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims ) def UpperCamelCase ( self : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str ) -> str: lowerCamelCase_ = self.transformer(input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0] lowerCamelCase_ = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None] return self.LinearTransformation(__SCREAMING_SNAKE_CASE ), embs
183
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :List[Any] = { 'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = "yolos" def __init__( self : Any ,A : Optional[Any]=7_68 ,A : Dict=12 ,A : Any=12 ,A : str=30_72 ,A : Any="gelu" ,A : str=0.0 ,A : List[str]=0.0 ,A : Dict=0.02 ,A : int=1E-12 ,A : Tuple=[5_12, 8_64] ,A : List[Any]=16 ,A : str=3 ,A : str=True ,A : Any=1_00 ,A : Dict=True ,A : Dict=False ,A : Tuple=1 ,A : Union[str, Any]=5 ,A : Optional[Any]=2 ,A : Union[str, Any]=5 ,A : int=2 ,A : int=0.1 ,**A : List[str] ,): super().__init__(**A ) __A = hidden_size __A = num_hidden_layers __A = num_attention_heads __A = intermediate_size __A = hidden_act __A = hidden_dropout_prob __A = attention_probs_dropout_prob __A = initializer_range __A = layer_norm_eps __A = image_size __A = patch_size __A = num_channels __A = qkv_bias __A = num_detection_tokens __A = use_mid_position_embeddings __A = auxiliary_loss # Hungarian matcher __A = class_cost __A = bbox_cost __A = giou_cost # Loss coefficients __A = bbox_loss_coefficient __A = giou_loss_coefficient __A = eos_coefficient class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = version.parse("1.11" ) @property def UpperCamelCase_ ( self : str ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def UpperCamelCase_ ( self : List[Any] ): return 1E-4 @property def UpperCamelCase_ ( self : Optional[Any] ): return 12
15
0
import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor lowerCAmelCase: List[Any] = logging.get_logger(__name__) class a__( lowerCamelCase__ ): def __init__( self : Optional[Any] , *__snake_case : str , **__snake_case : List[str] ): warnings.warn( 'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use DPTImageProcessor instead.' , __snake_case , ) super().__init__(*__snake_case , **__snake_case )
353
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase: Any = logging.get_logger(__name__) class a__( lowerCamelCase__ ): lowercase__ = """encoder-decoder""" lowercase__ = True def __init__( self : Dict , **__snake_case : Union[str, Any] ): super().__init__(**__snake_case ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" a : List[str] = kwargs.pop('encoder' ) a : Optional[Any] = encoder_config.pop('model_type' ) a : Tuple = kwargs.pop('decoder' ) a : Optional[int] = decoder_config.pop('model_type' ) from ..auto.configuration_auto import AutoConfig a : Any = AutoConfig.for_model(__snake_case , **__snake_case ) a : Optional[int] = AutoConfig.for_model(__snake_case , **__snake_case ) a : Tuple = True @classmethod def lowercase_ ( cls : int , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : Union[str, Any] ): logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' ) a : List[Any] = True a : Tuple = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__snake_case ) def lowercase_ ( self : List[Any] ): a : int = copy.deepcopy(self.__dict__ ) a : List[str] = self.encoder.to_dict() a : Optional[int] = self.decoder.to_dict() a : Optional[Any] = self.__class__.model_type return output
96
0
'''simple docstring''' import importlib.util import os import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import ( is_accelerate_available, is_flax_available, is_safetensors_available, is_tf_available, is_torch_available, ) from . import BaseTransformersCLICommand def a__ ( a__ ): """simple docstring""" return EnvironmentCommand() def a__ ( a__ ): """simple docstring""" return EnvironmentCommand(args.accelerate_config_file ) class lowerCAmelCase__ ( lowerCAmelCase__ ): """simple docstring""" @staticmethod def UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE : List[str] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = parser.add_parser("""env""" ) download_parser.set_defaults(func=__lowerCAmelCase ) download_parser.add_argument( """--accelerate-config_file""" , default=__lowerCAmelCase , help="""The accelerate config file to use for the default values in the launching script.""" , ) download_parser.set_defaults(func=__lowerCAmelCase ) def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , *__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = accelerate_config_file def UpperCAmelCase__ ( self : List[str] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = """not installed""" if is_safetensors_available(): import safetensors __SCREAMING_SNAKE_CASE = safetensors.__version__ elif importlib.util.find_spec("""safetensors""" ) is not None: import safetensors __SCREAMING_SNAKE_CASE = f'{safetensors.__version__} but is ignored because of PyTorch version too old.' __SCREAMING_SNAKE_CASE = """not installed""" __SCREAMING_SNAKE_CASE = __SCREAMING_SNAKE_CASE = """not found""" if is_accelerate_available(): import accelerate from accelerate.commands.config import default_config_file, load_config_from_file __SCREAMING_SNAKE_CASE = accelerate.__version__ # Get the default from the config file. if self._accelerate_config_file is not None or os.path.isfile(__lowerCAmelCase ): __SCREAMING_SNAKE_CASE = load_config_from_file(self._accelerate_config_file ).to_dict() __SCREAMING_SNAKE_CASE = ( """\n""".join([f'\t- {prop}: {val}' for prop, val in accelerate_config.items()] ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else f'\t{accelerate_config}' ) __SCREAMING_SNAKE_CASE = """not installed""" __SCREAMING_SNAKE_CASE = """NA""" if is_torch_available(): import torch __SCREAMING_SNAKE_CASE = torch.__version__ __SCREAMING_SNAKE_CASE = torch.cuda.is_available() __SCREAMING_SNAKE_CASE = """not installed""" __SCREAMING_SNAKE_CASE = """NA""" if is_tf_available(): import tensorflow as tf __SCREAMING_SNAKE_CASE = tf.__version__ try: # deprecated in v2.1 __SCREAMING_SNAKE_CASE = tf.test.is_gpu_available() except AttributeError: # returns list of devices, convert to bool __SCREAMING_SNAKE_CASE = bool(tf.config.list_physical_devices("""GPU""" ) ) __SCREAMING_SNAKE_CASE = """not installed""" __SCREAMING_SNAKE_CASE = """not installed""" __SCREAMING_SNAKE_CASE = """not installed""" __SCREAMING_SNAKE_CASE = """NA""" if is_flax_available(): import flax import jax import jaxlib __SCREAMING_SNAKE_CASE = flax.__version__ __SCREAMING_SNAKE_CASE = jax.__version__ __SCREAMING_SNAKE_CASE = jaxlib.__version__ __SCREAMING_SNAKE_CASE = jax.lib.xla_bridge.get_backend().platform __SCREAMING_SNAKE_CASE = { """`transformers` version""": version, """Platform""": platform.platform(), """Python version""": platform.python_version(), """Huggingface_hub version""": huggingface_hub.__version__, """Safetensors version""": f'{safetensors_version}', """Accelerate version""": f'{accelerate_version}', """Accelerate config""": f'{accelerate_config_str}', """PyTorch version (GPU?)""": f'{pt_version} ({pt_cuda_available})', """Tensorflow version (GPU?)""": f'{tf_version} ({tf_cuda_available})', """Flax version (CPU?/GPU?/TPU?)""": f'{flax_version} ({jax_backend})', """Jax version""": f'{jax_version}', """JaxLib version""": f'{jaxlib_version}', """Using GPU in script?""": """<fill in>""", """Using distributed or parallel set-up in script?""": """<fill in>""", } print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" ) print(self.format_dict(__lowerCAmelCase ) ) return info @staticmethod def UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE : Dict ) -> int: """simple docstring""" return "\n".join([f'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
267
'''simple docstring''' import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class a__( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase_ : Tuple = XLMRobertaTokenizer UpperCAmelCase_ : int = XLMRobertaTokenizerFast UpperCAmelCase_ : List[str] = True UpperCAmelCase_ : Optional[int] = True def a_ ( self): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase = XLMRobertaTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase) tokenizer.save_pretrained(self.tmpdirname) def a_ ( self): """simple docstring""" lowerCAmelCase = """<pad>""" lowerCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase) , __lowerCAmelCase) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase) , __lowerCAmelCase) def a_ ( self): """simple docstring""" lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , """<s>""") self.assertEqual(vocab_keys[1] , """<pad>""") self.assertEqual(vocab_keys[-1] , """<mask>""") self.assertEqual(len(__lowerCAmelCase) , 1002) def a_ ( self): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1002) def a_ ( self): """simple docstring""" lowerCAmelCase = XLMRobertaTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase) lowerCAmelCase = tokenizer.tokenize("""This is a test""") self.assertListEqual(__lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]) self.assertListEqual( tokenizer.convert_tokens_to_ids(__lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""") self.assertListEqual( __lowerCAmelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) lowerCAmelCase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase) self.assertListEqual( __lowerCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) lowerCAmelCase = tokenizer.convert_ids_to_tokens(__lowerCAmelCase) self.assertListEqual( __lowerCAmelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def a_ ( self): """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return lowerCAmelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase) lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files)) lowerCAmelCase = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f) self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase) # Checks everything loads correctly in the same way lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase) lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase)) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(__lowerCAmelCase) # Save tokenizer rust, legacy_format=True lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase) lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase) # Checks it save with the same files self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase) # Checks everything loads correctly in the same way lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase) lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase)) shutil.rmtree(__lowerCAmelCase) # Save tokenizer rust, legacy_format=False lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase) lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files)) # Checks everything loads correctly in the same way lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase) lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase)) shutil.rmtree(__lowerCAmelCase) @cached_property def a_ ( self): """simple docstring""" return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""") def a_ ( self): """simple docstring""" with tempfile.NamedTemporaryFile() as f: shutil.copyfile(__lowerCAmelCase , f.name) lowerCAmelCase = XLMRobertaTokenizer(f.name , keep_accents=__lowerCAmelCase) lowerCAmelCase = pickle.dumps(__lowerCAmelCase) pickle.loads(__lowerCAmelCase) def a_ ( self): """simple docstring""" if not self.test_rust_tokenizer: return lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = self.get_rust_tokenizer() lowerCAmelCase = """I was born in 92000, and this is falsé.""" lowerCAmelCase = tokenizer.tokenize(__lowerCAmelCase) lowerCAmelCase = rust_tokenizer.tokenize(__lowerCAmelCase) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase) lowerCAmelCase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase) lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase) lowerCAmelCase = self.get_rust_tokenizer() lowerCAmelCase = tokenizer.encode(__lowerCAmelCase) lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase) @slow def a_ ( self): """simple docstring""" lowerCAmelCase = """Hello World!""" lowerCAmelCase = [0, 35378, 6661, 38, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase)) @slow def a_ ( self): """simple docstring""" lowerCAmelCase = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) lowerCAmelCase = [ 0, 3293, 83, 10, 4552, 4989, 7986, 678, 10, 5915, 111, 179459, 124850, 4, 6044, 237, 12, 6, 5, 6, 4, 6780, 705, 15, 1388, 44, 378, 10114, 711, 152, 20, 6, 5, 22376, 642, 1221, 15190, 34153, 450, 5608, 959, 1119, 57702, 136, 186, 47, 1098, 29367, 47, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 6044, 237, 6284, 50901, 528, 31, 90, 34, 927, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase)) @slow def a_ ( self): """simple docstring""" lowerCAmelCase = {"""input_ids""": [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowerCAmelCase , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
272
0
"""simple docstring""" lowercase__ = { """km/h""": 1.0, """m/s""": 3.6, """mph""": 1.609344, """knot""": 1.852, } lowercase__ = { """km/h""": 1.0, """m/s""": 0.277777778, """mph""": 0.621371192, """knot""": 0.539956803, } def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): if unit_to not in speed_chart or unit_from not in speed_chart_inverse: _lowerCamelCase : Optional[Any] = ( f'''Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n''' f'''Valid values are: {', '.join(lowercase__ )}''' ) raise ValueError(lowercase__ ) return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 ) if __name__ == "__main__": import doctest doctest.testmod()
367
"""simple docstring""" import string # frequency taken from https://en.wikipedia.org/wiki/Letter_frequency lowercase__ = { """E""": 12.70, """T""": 9.06, """A""": 8.17, """O""": 7.51, """I""": 6.97, """N""": 6.75, """S""": 6.33, """H""": 6.09, """R""": 5.99, """D""": 4.25, """L""": 4.03, """C""": 2.78, """U""": 2.76, """M""": 2.41, """W""": 2.36, """F""": 2.23, """G""": 2.02, """Y""": 1.97, """P""": 1.93, """B""": 1.29, """V""": 0.98, """K""": 0.77, """J""": 0.15, """X""": 0.15, """Q""": 0.10, """Z""": 0.07, } lowercase__ = """ETAOINSHRDLCUMWFGYPBVKJXQZ""" lowercase__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ""" def _snake_case ( lowercase__ ): _lowerCamelCase : Tuple = {letter: 0 for letter in string.ascii_uppercase} for letter in message.upper(): if letter in LETTERS: letter_count[letter] += 1 return letter_count def _snake_case ( lowercase__ ): return x[0] def _snake_case ( lowercase__ ): _lowerCamelCase : List[Any] = get_letter_count(lowercase__ ) _lowerCamelCase : dict[int, list[str]] = { freq: [] for letter, freq in letter_to_freq.items() } for letter in LETTERS: freq_to_letter[letter_to_freq[letter]].append(lowercase__ ) _lowerCamelCase : dict[int, str] = {} for freq in freq_to_letter: freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowercase__ ) _lowerCamelCase : Optional[int] = ''.join(freq_to_letter[freq] ) _lowerCamelCase : Any = list(freq_to_letter_str.items() ) freq_pairs.sort(key=lowercase__ , reverse=lowercase__ ) _lowerCamelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs] return "".join(lowercase__ ) def _snake_case ( lowercase__ ): _lowerCamelCase : str = get_frequency_order(lowercase__ ) _lowerCamelCase : Union[str, Any] = 0 for common_letter in ETAOIN[:6]: if common_letter in freq_order[:6]: match_score += 1 for uncommon_letter in ETAOIN[-6:]: if uncommon_letter in freq_order[-6:]: match_score += 1 return match_score if __name__ == "__main__": import doctest doctest.testmod()
12
0
'''simple docstring''' def __lowerCamelCase ( _lowercase , _lowercase ) -> Tuple: UpperCAmelCase : Union[str, Any] = """""" for i in table: res += inp[i - 1] return res def __lowerCamelCase ( _lowercase ) -> Dict: return data[1:] + data[0] def __lowerCamelCase ( _lowercase , _lowercase ) -> List[str]: UpperCAmelCase : Dict = """""" for i in range(len(_lowercase ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def __lowerCamelCase ( _lowercase , _lowercase ) -> Any: UpperCAmelCase : Dict = int("""0b""" + data[0] + data[-1] , 2 ) UpperCAmelCase : Optional[int] = int("""0b""" + data[1:3] , 2 ) return bin(s[row][col] )[2:] def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[int]: UpperCAmelCase : int = message[:4] UpperCAmelCase : int = message[4:] UpperCAmelCase : Tuple = apply_table(_lowercase , _lowercase ) UpperCAmelCase : Any = xor(_lowercase , _lowercase ) UpperCAmelCase : Optional[Any] = apply_sbox(_lowercase , temp[:4] ) # noqa: E741 UpperCAmelCase : Dict = apply_sbox(_lowercase , temp[4:] ) UpperCAmelCase : Optional[int] = """0""" * (2 - len(_lowercase )) + l # noqa: E741 UpperCAmelCase : Optional[Any] = """0""" * (2 - len(_lowercase )) + r UpperCAmelCase : str = apply_table(l + r , _lowercase ) UpperCAmelCase : int = xor(_lowercase , _lowercase ) return temp + right if __name__ == "__main__": a : Optional[int] = input("""Enter 10 bit key: """) a : Dict = input("""Enter 8 bit message: """) a : str = [6, 3, 7, 4, 8, 5, 1_0, 9] a : int = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6] a : Any = [2, 4, 3, 1] a : Optional[int] = [2, 6, 3, 1, 4, 8, 5, 7] a : List[Any] = [4, 1, 3, 5, 7, 2, 8, 6] a : Dict = [4, 1, 2, 3, 2, 3, 4, 1] a : List[Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] a : Union[str, Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation a : int = apply_table(key, paa_table) a : Any = temp[:5] a : List[str] = temp[5:] a : Any = left_shift(left) a : int = left_shift(right) a : Optional[int] = apply_table(left + right, pa_table) a : Dict = left_shift(left) a : Optional[int] = left_shift(right) a : List[str] = left_shift(left) a : str = left_shift(right) a : Any = apply_table(left + right, pa_table) # encryption a : Dict = apply_table(message, IP) a : Any = function(expansion, sa, sa, keya, temp) a : List[str] = temp[4:] + temp[:4] a : Any = function(expansion, sa, sa, keya, temp) a : Union[str, Any] = apply_table(temp, IP_inv) print("""Cipher text is:""", CT) # decryption a : str = apply_table(CT, IP) a : str = function(expansion, sa, sa, keya, temp) a : Optional[Any] = temp[4:] + temp[:4] a : List[str] = function(expansion, sa, sa, keya, temp) a : str = apply_table(temp, IP_inv) print("""Plain text after decypting is:""", PT)
265
'''simple docstring''' import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a : Any = get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = AlbertTokenizer lowercase = AlbertTokenizerFast lowercase = True lowercase = True lowercase = True def _lowercase( self ) -> str: super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase : Optional[int] = AlbertTokenizer(A ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase( self , A ) -> int: UpperCAmelCase : Optional[int] = """this is a test""" UpperCAmelCase : Dict = """this is a test""" return input_text, output_text def _lowercase( self ) -> int: UpperCAmelCase : Tuple = """<pad>""" UpperCAmelCase : Optional[int] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A ) def _lowercase( self ) -> Any: UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<pad>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """▁eloquent""" ) self.assertEqual(len(A ) , 30000 ) def _lowercase( self ) -> Tuple: self.assertEqual(self.get_tokenizer().vocab_size , 30000 ) def _lowercase( self ) -> Union[str, Any]: if not self.test_rust_tokenizer: return UpperCAmelCase : int = self.get_tokenizer() UpperCAmelCase : List[str] = self.get_rust_tokenizer() UpperCAmelCase : Optional[Any] = """I was born in 92000, and this is falsé.""" UpperCAmelCase : str = tokenizer.tokenize(A ) UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(A ) self.assertListEqual(A , A ) UpperCAmelCase : Any = tokenizer.encode(A , add_special_tokens=A ) UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A , add_special_tokens=A ) self.assertListEqual(A , A ) UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer() UpperCAmelCase : Union[str, Any] = tokenizer.encode(A ) UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A ) self.assertListEqual(A , A ) def _lowercase( self ) -> Any: UpperCAmelCase : List[Any] = AlbertTokenizer(A , keep_accents=A ) UpperCAmelCase : Optional[int] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(A , ["""▁this""", """▁is""", """▁a""", """▁test"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [48, 25, 21, 1289] ) UpperCAmelCase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] ) UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A ) self.assertListEqual(A , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A ) self.assertListEqual( A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : str = AlbertTokenizer(A ) UpperCAmelCase : Optional[int] = tokenizer.encode("""sequence builders""" ) UpperCAmelCase : Any = tokenizer.encode("""multi-sequence build""" ) UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A ) UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(A , A ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def _lowercase( self ) -> Dict: # fmt: off UpperCAmelCase : Tuple = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
265
1
"""simple docstring""" import math import flax.linen as nn import jax.numpy as jnp def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ = 1 , lowercase__ = 1 , lowercase__ = 1.0e4 , lowercase__ = False , lowercase__ = 1.0 , ): """simple docstring""" assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even""" A = float(embedding_dim // 2 ) A = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) A = min_timescale * jnp.exp(jnp.arange(lowercase__ , dtype=jnp.floataa ) * -log_timescale_increment ) A = jnp.expand_dims(lowercase__ , 1 ) * jnp.expand_dims(lowercase__ , 0 ) # scale embeddings A = scale * emb if flip_sin_to_cos: A = jnp.concatenate([jnp.cos(lowercase__ ), jnp.sin(lowercase__ )] , axis=1 ) else: A = jnp.concatenate([jnp.sin(lowercase__ ), jnp.cos(lowercase__ )] , axis=1 ) A = jnp.reshape(lowercase__ , [jnp.shape(lowercase__ )[0], embedding_dim] ) return signal class __UpperCamelCase ( nn.Module ): SCREAMING_SNAKE_CASE = 32 SCREAMING_SNAKE_CASE = jnp.floataa @nn.compact def __call__(self : Tuple , __SCREAMING_SNAKE_CASE : int): A = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1")(__SCREAMING_SNAKE_CASE) A = nn.silu(__SCREAMING_SNAKE_CASE) A = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2")(__SCREAMING_SNAKE_CASE) return temb class __UpperCamelCase ( nn.Module ): SCREAMING_SNAKE_CASE = 32 SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = 1 @nn.compact def __call__(self : Optional[int] , __SCREAMING_SNAKE_CASE : str): return get_sinusoidal_embeddings( __SCREAMING_SNAKE_CASE , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift)
57
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() __A : int = logging.get_logger(__name__) __A : Optional[int] = [ ('bert.bert', 'visual_bert'), ('bert.cls', 'cls'), ('bert.classifier', 'cls'), ('token_type_embeddings_visual', 'visual_token_type_embeddings'), ('position_embeddings_visual', 'visual_position_embeddings'), ('projection', 'visual_projection'), ] __A : Union[str, Any] = [ 'nlvr2_coco_pre_trained.th', 'nlvr2_fine_tuned.th', 'nlvr2_pre_trained.th', 'vcr_coco_pre_train.th', 'vcr_fine_tune.th', 'vcr_pre_train.th', 'vqa_coco_pre_trained.th', 'vqa_fine_tuned.th', 'vqa_pre_trained.th', ] def __SCREAMING_SNAKE_CASE ( lowercase__ ): """simple docstring""" A = torch.load(lowercase__ , map_location="cpu" ) return sd def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__=rename_keys_prefix ): """simple docstring""" A = OrderedDict() A = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue A = key for name_pair in rename_keys_prefix: A = new_key.replace(name_pair[0] , name_pair[1] ) A = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately A = new_d["cls.predictions.bias"] return new_d @torch.no_grad() def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ): """simple docstring""" assert ( checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS ), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.""" # Get Config if "pre" in checkpoint_path: A = "pretraining" if "vcr" in checkpoint_path: A = {"visual_embedding_dim": 512} elif "vqa_advanced" in checkpoint_path: A = {"visual_embedding_dim": 2_048} elif "vqa" in checkpoint_path: A = {"visual_embedding_dim": 2_048} elif "nlvr" in checkpoint_path: A = {"visual_embedding_dim": 1_024} else: raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" ) else: if "vcr" in checkpoint_path: A = {"visual_embedding_dim": 512} A = "multichoice" elif "vqa_advanced" in checkpoint_path: A = {"visual_embedding_dim": 2_048} A = "vqa_advanced" elif "vqa" in checkpoint_path: A = {"visual_embedding_dim": 2_048, "num_labels": 3_129} A = "vqa" elif "nlvr" in checkpoint_path: A = { "visual_embedding_dim": 1_024, "num_labels": 2, } A = "nlvr" A = VisualBertConfig(**lowercase__ ) # Load State Dict A = load_state_dict(lowercase__ ) A = get_new_dict(lowercase__ , lowercase__ ) if model_type == "pretraining": A = VisualBertForPreTraining(lowercase__ ) elif model_type == "vqa": A = VisualBertForQuestionAnswering(lowercase__ ) elif model_type == "nlvr": A = VisualBertForVisualReasoning(lowercase__ ) elif model_type == "multichoice": A = VisualBertForMultipleChoice(lowercase__ ) model.load_state_dict(lowercase__ ) # Save Checkpoints Path(lowercase__ ).mkdir(exist_ok=lowercase__ ) model.save_pretrained(lowercase__ ) if __name__ == "__main__": __A : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.') parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.') __A : Any = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
57
1
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL a =logging.get_logger(__name__) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple[int, int]: def constraint_to_multiple_of(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=0 , lowerCamelCase__=None ): __lowerCamelCase : Dict = round(val / multiple ) * multiple if max_val is not None and x > max_val: __lowerCamelCase : Dict = math.floor(val / multiple ) * multiple if x < min_val: __lowerCamelCase : Optional[int] = math.ceil(val / multiple ) * multiple return x __lowerCamelCase : Tuple = (output_size, output_size) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else output_size __lowerCamelCase , __lowerCamelCase : Optional[Any] = get_image_size(lowerCamelCase__ ) __lowerCamelCase , __lowerCamelCase : Tuple = output_size # determine new height and width __lowerCamelCase : Any = output_height / input_height __lowerCamelCase : int = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width __lowerCamelCase : str = scale_width else: # fit height __lowerCamelCase : Optional[int] = scale_height __lowerCamelCase : Union[str, Any] = constraint_to_multiple_of(scale_height * input_height , multiple=lowerCamelCase__ ) __lowerCamelCase : Any = constraint_to_multiple_of(scale_width * input_width , multiple=lowerCamelCase__ ) return (new_height, new_width) class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : str = ['''pixel_values'''] def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 2_5_5 ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,**SCREAMING_SNAKE_CASE__ : List[str] ,): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Dict = size if size is not None else {'height': 3_8_4, 'width': 3_8_4} __lowerCamelCase : str = get_size_dict(SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = do_resize __lowerCamelCase : Optional[Any] = size __lowerCamelCase : Optional[Any] = keep_aspect_ratio __lowerCamelCase : List[Any] = ensure_multiple_of __lowerCamelCase : Any = resample __lowerCamelCase : Union[str, Any] = do_rescale __lowerCamelCase : Tuple = rescale_factor __lowerCamelCase : List[Any] = do_normalize __lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __lowerCamelCase : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Dict[str, int] ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : Any ,): __lowerCamelCase : Tuple = get_size_dict(SCREAMING_SNAKE_CASE__) if "height" not in size or "width" not in size: raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}") __lowerCamelCase : Tuple = get_resize_output_image_size( SCREAMING_SNAKE_CASE__ ,output_size=(size['height'], size['width']) ,keep_aspect_ratio=SCREAMING_SNAKE_CASE__ ,multiple=SCREAMING_SNAKE_CASE__ ,) return resize(SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ,resample=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Union[int, float] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : Optional[int] ,): return rescale(SCREAMING_SNAKE_CASE__ ,scale=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Union[float, List[float]] ,SCREAMING_SNAKE_CASE__ : Union[float, List[float]] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : str ,): return normalize(SCREAMING_SNAKE_CASE__ ,mean=SCREAMING_SNAKE_CASE__ ,std=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : ImageInput ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : int = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : int = None ,SCREAMING_SNAKE_CASE__ : PILImageResampling = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None ,SCREAMING_SNAKE_CASE__ : ChannelDimension = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ,): __lowerCamelCase : List[Any] = do_resize if do_resize is not None else self.do_resize __lowerCamelCase : Tuple = size if size is not None else self.size __lowerCamelCase : Dict = get_size_dict(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Tuple = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio __lowerCamelCase : Dict = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of __lowerCamelCase : Optional[int] = resample if resample is not None else self.resample __lowerCamelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale __lowerCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor __lowerCamelCase : int = do_normalize if do_normalize is not None else self.do_normalize __lowerCamelCase : List[Any] = image_mean if image_mean is not None else self.image_mean __lowerCamelCase : Dict = image_std if image_std is not None else self.image_std __lowerCamelCase : Tuple = make_list_of_images(SCREAMING_SNAKE_CASE__) if not valid_images(SCREAMING_SNAKE_CASE__): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.') if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.') if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.') # All transformations expect numpy arrays. __lowerCamelCase : Dict = [to_numpy_array(SCREAMING_SNAKE_CASE__) for image in images] if do_resize: __lowerCamelCase : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ,resample=SCREAMING_SNAKE_CASE__) for image in images] if do_rescale: __lowerCamelCase : Union[str, Any] = [self.rescale(image=SCREAMING_SNAKE_CASE__ ,scale=SCREAMING_SNAKE_CASE__) for image in images] if do_normalize: __lowerCamelCase : Dict = [self.normalize(image=SCREAMING_SNAKE_CASE__ ,mean=SCREAMING_SNAKE_CASE__ ,std=SCREAMING_SNAKE_CASE__) for image in images] __lowerCamelCase : Optional[int] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) for image in images] __lowerCamelCase : Union[str, Any] = {'pixel_values': images} return BatchFeature(data=SCREAMING_SNAKE_CASE__ ,tensor_type=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : List[Tuple] = None): __lowerCamelCase : Union[str, Any] = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(SCREAMING_SNAKE_CASE__) != len(SCREAMING_SNAKE_CASE__): raise ValueError( 'Make sure that you pass in as many target sizes as the batch dimension of the logits') if is_torch_tensor(SCREAMING_SNAKE_CASE__): __lowerCamelCase : List[str] = target_sizes.numpy() __lowerCamelCase : Any = [] for idx in range(len(SCREAMING_SNAKE_CASE__)): __lowerCamelCase : Tuple = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0) ,size=target_sizes[idx] ,mode='bilinear' ,align_corners=SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = resized_logits[0].argmax(dim=0) semantic_segmentation.append(SCREAMING_SNAKE_CASE__) else: __lowerCamelCase : List[Any] = logits.argmax(dim=1) __lowerCamelCase : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation
73
from math import loga def lowerCamelCase__ ( snake_case_ : int ) -> int: if a < 0: raise ValueError('''Input value must be a positive integer''' ) elif isinstance(snake_case_ , snake_case_ ): raise TypeError('''Input value must be a \'int\' type''' ) return 0 if (a == 0) else int(loga(a & -a ) ) if __name__ == "__main__": import doctest doctest.testmod()
24
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class UpperCAmelCase : """simple docstring""" _UpperCAmelCase :str = BlenderbotConfig _UpperCAmelCase :Dict = {} _UpperCAmelCase :Tuple = "gelu" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=20 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , ): lowercase__: Optional[Any] = parent lowercase__: Any = batch_size lowercase__: Optional[Any] = seq_length lowercase__: Optional[int] = is_training lowercase__: Optional[Any] = use_labels lowercase__: List[Any] = vocab_size lowercase__: Tuple = hidden_size lowercase__: str = num_hidden_layers lowercase__: str = num_attention_heads lowercase__: Dict = intermediate_size lowercase__: Dict = hidden_dropout_prob lowercase__: Any = attention_probs_dropout_prob lowercase__: Any = max_position_embeddings lowercase__: str = eos_token_id lowercase__: List[str] = pad_token_id lowercase__: List[Any] = bos_token_id def _snake_case ( self ): lowercase__: Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) lowercase__: Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) lowercase__: Tuple = tf.concat([input_ids, eos_tensor] , axis=1 ) lowercase__: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__: Union[str, Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) lowercase__: List[str] = prepare_blenderbot_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return config, inputs_dict def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: List[Any] = TFBlenderbotModel(config=_UpperCAmelCase ).get_decoder() lowercase__: Dict = inputs_dict['''input_ids'''] lowercase__: List[Any] = input_ids[:1, :] lowercase__: int = inputs_dict['''attention_mask'''][:1, :] lowercase__: Union[str, Any] = inputs_dict['''head_mask'''] lowercase__: List[Any] = 1 # first forward pass lowercase__: Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase ) lowercase__: Dict = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowercase__: Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowercase__: Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and lowercase__: Tuple = tf.concat([input_ids, next_tokens] , axis=-1 ) lowercase__: str = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) lowercase__: Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0] lowercase__: Optional[int] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice lowercase__: str = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) lowercase__: List[Any] = output_from_no_past[:, -3:, random_slice_idx] lowercase__: int = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_UpperCAmelCase , _UpperCAmelCase , rtol=1e-3 ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> List[str]: if attention_mask is None: lowercase__: Union[str, Any] = tf.cast(tf.math.not_equal(__UpperCAmelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: lowercase__: Optional[int] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: lowercase__: int = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowercase__: Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowercase__: Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :str = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () _UpperCAmelCase :Dict = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else () _UpperCAmelCase :Optional[Any] = ( { "conversational": TFBlenderbotForConditionalGeneration, "feature-extraction": TFBlenderbotModel, "summarization": TFBlenderbotForConditionalGeneration, "text2text-generation": TFBlenderbotForConditionalGeneration, "translation": TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) _UpperCAmelCase :List[str] = True _UpperCAmelCase :Dict = False _UpperCAmelCase :Optional[Any] = False def _snake_case ( self ): lowercase__: Optional[Any] = TFBlenderbotModelTester(self ) lowercase__: Any = ConfigTester(self , config_class=_UpperCAmelCase ) def _snake_case ( self ): self.config_tester.run_common_tests() def _snake_case ( self ): lowercase__: Tuple = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase ) @require_tokenizers @require_tf class UpperCAmelCase (unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Union[str, Any] = ["My friends are cool but they eat too many carbs."] _UpperCAmelCase :str = "facebook/blenderbot-400M-distill" @cached_property def _snake_case ( self ): return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def _snake_case ( self ): lowercase__: Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def _snake_case ( self ): lowercase__: List[str] = self.tokenizer(self.src_text , return_tensors='''tf''' ) lowercase__: Union[str, Any] = self.model.generate( model_inputs.input_ids , ) lowercase__: Any = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_UpperCAmelCase )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
354
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 5_0 ) -> int: lowercase__: str = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f'''{solution() = }''')
2
0
'''simple docstring''' import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from accelerate.utils import ( OffloadedWeightsLoader, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, ) class UpperCAmelCase_ ( nn.Module ): def __init__( self : Dict ) -> List[str]: super().__init__() lowerCAmelCase = nn.Linear(3 , 4 ) lowerCAmelCase = nn.BatchNormad(4 ) lowerCAmelCase = nn.Linear(4 , 5 ) def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Tuple ) -> Any: return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase__ ) ) ) class UpperCAmelCase_ ( unittest.TestCase ): def __UpperCAmelCase ( self : Any ) -> Any: lowerCAmelCase = ModelForTest() with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCAmelCase__ , model.state_dict() ) lowerCAmelCase = os.path.join(UpperCAmelCase__ , 'index.json' ) self.assertTrue(os.path.isfile(UpperCAmelCase__ ) ) # TODO: add tests on what is inside the index for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: lowerCAmelCase = os.path.join(UpperCAmelCase__ , F'''{key}.dat''' ) self.assertTrue(os.path.isfile(UpperCAmelCase__ ) ) # TODO: add tests on the fact weights are properly loaded def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple: lowerCAmelCase = [torch.floataa, torch.floataa, torch.bfloataa] for dtype in dtypes: lowerCAmelCase = torch.randn(2 , 3 , dtype=UpperCAmelCase__ ) with TemporaryDirectory() as tmp_dir: lowerCAmelCase = offload_weight(UpperCAmelCase__ , 'weight' , UpperCAmelCase__ , {} ) lowerCAmelCase = os.path.join(UpperCAmelCase__ , 'weight.dat' ) self.assertTrue(os.path.isfile(UpperCAmelCase__ ) ) self.assertDictEqual(UpperCAmelCase__ , {'weight': {'shape': [2, 3], 'dtype': str(UpperCAmelCase__ ).split('.' )[1]}} ) lowerCAmelCase = load_offloaded_weight(UpperCAmelCase__ , index['weight'] ) self.assertTrue(torch.equal(UpperCAmelCase__ , UpperCAmelCase__ ) ) def __UpperCAmelCase ( self : Tuple ) -> Any: lowerCAmelCase = ModelForTest() lowerCAmelCase = model.state_dict() lowerCAmelCase = {k: v for k, v in state_dict.items() if 'linear2' not in k} lowerCAmelCase = {k: v for k, v in state_dict.items() if 'linear2' in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = OffloadedWeightsLoader(state_dict=UpperCAmelCase__ , save_folder=UpperCAmelCase__ ) # Every key is there with the right value self.assertEqual(sorted(UpperCAmelCase__ ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(UpperCAmelCase__ , weight_map[key] ) ) lowerCAmelCase = {k: v for k, v in state_dict.items() if 'weight' in k} lowerCAmelCase = {k: v for k, v in state_dict.items() if 'weight' not in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = OffloadedWeightsLoader(state_dict=UpperCAmelCase__ , save_folder=UpperCAmelCase__ ) # Every key is there with the right value self.assertEqual(sorted(UpperCAmelCase__ ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(UpperCAmelCase__ , weight_map[key] ) ) with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCAmelCase__ , UpperCAmelCase__ ) # Duplicates are removed lowerCAmelCase = OffloadedWeightsLoader(state_dict=UpperCAmelCase__ , save_folder=UpperCAmelCase__ ) # Every key is there with the right value self.assertEqual(sorted(UpperCAmelCase__ ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(UpperCAmelCase__ , weight_map[key] ) ) def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]: lowerCAmelCase = {'a.1': 0, 'a.10': 1, 'a.2': 2} lowerCAmelCase = extract_submodules_state_dict(UpperCAmelCase__ , ['a.1', 'a.2'] ) self.assertDictEqual(UpperCAmelCase__ , {'a.1': 0, 'a.2': 2} ) lowerCAmelCase = {'a.1.a': 0, 'a.10.a': 1, 'a.2.a': 2} lowerCAmelCase = extract_submodules_state_dict(UpperCAmelCase__ , ['a.1', 'a.2'] ) self.assertDictEqual(UpperCAmelCase__ , {'a.1.a': 0, 'a.2.a': 2} )
4
'''simple docstring''' # Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #################################################################################################### # # Note: If when running this conversion script you're getting an exception: # ModuleNotFoundError: No module named 'megatron.model.enums' # you need to tell python where to find the clone of Megatron-LM, e.g.: # # cd /tmp # git clone https://github.com/NVIDIA/Megatron-LM # PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ... # # if you already have it cloned elsewhere, simply adjust the path to the existing path # # If the training was done using a Megatron-LM fork, e.g., # https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one # in your path, i.e., /path/to/Megatron-DeepSpeed/ # import argparse import os import re import zipfile import torch from transformers import AutoTokenizer, GPTaConfig def a_ ( lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any]=0 ): # Format the message. if name is None: lowerCAmelCase = None else: lowerCAmelCase = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}' lowerCAmelCase = fmt.format(lowerCamelCase ) # Print and recurse (if needed). if isinstance(lowerCamelCase , lowerCamelCase ): if msg is not None: print(lowerCamelCase ) for k in val.keys(): recursive_print(lowerCamelCase , val[k] , spaces + 2 ) elif isinstance(lowerCamelCase , torch.Tensor ): print(lowerCamelCase , ':' , val.size() ) else: print(lowerCamelCase , ':' , lowerCamelCase ) def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : Tuple ): # Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :] # for compatibility with later versions of NVIDIA Megatron-LM. # The inverse operation is performed inside Megatron-LM to read checkpoints: # https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209 # If param is the weight tensor of the self-attention block, the returned tensor # will have to be transposed one more time to be read by HuggingFace GPT2. lowerCAmelCase = param.size() if checkpoint_version == 1.0: # version 1.0 stores [num_heads * hidden_size * num_splits, :] lowerCAmelCase = (num_heads, hidden_size, num_splits) + input_shape[1:] lowerCAmelCase = param.view(*lowerCamelCase ) lowerCAmelCase = param.transpose(0 , 2 ) lowerCAmelCase = param.transpose(1 , 2 ).contiguous() elif checkpoint_version >= 2.0: # other versions store [num_heads * num_splits * hidden_size, :] lowerCAmelCase = (num_heads, num_splits, hidden_size) + input_shape[1:] lowerCAmelCase = param.view(*lowerCamelCase ) lowerCAmelCase = param.transpose(0 , 1 ).contiguous() lowerCAmelCase = param.view(*lowerCamelCase ) return param def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : str ): # The converted output model. lowerCAmelCase = {} # old versions did not store training args lowerCAmelCase = input_state_dict.get('args' , lowerCamelCase ) if ds_args is not None: # do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint # from pprint import pprint # pprint(vars(ds_args)) lowerCAmelCase = ds_args.padded_vocab_size lowerCAmelCase = ds_args.max_position_embeddings lowerCAmelCase = ds_args.hidden_size lowerCAmelCase = ds_args.num_layers lowerCAmelCase = ds_args.num_attention_heads lowerCAmelCase = ds_args.ffn_hidden_size # pprint(config) # The number of heads. lowerCAmelCase = config.n_head # The hidden_size per head. lowerCAmelCase = config.n_embd // config.n_head # Megatron-LM checkpoint version if "checkpoint_version" in input_state_dict.keys(): lowerCAmelCase = input_state_dict['checkpoint_version'] else: lowerCAmelCase = 0.0 # The model. lowerCAmelCase = input_state_dict['model'] # The language model. lowerCAmelCase = model['language_model'] # The embeddings. lowerCAmelCase = lm['embedding'] # The word embeddings. lowerCAmelCase = embeddings['word_embeddings']['weight'] # Truncate the embedding table to vocab_size rows. lowerCAmelCase = word_embeddings[: config.vocab_size, :] lowerCAmelCase = word_embeddings # The position embeddings. lowerCAmelCase = embeddings['position_embeddings']['weight'] # Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size] lowerCAmelCase = pos_embeddings.size(0 ) if n_positions != config.n_positions: raise ValueError( f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' ) # Store the position embeddings. lowerCAmelCase = pos_embeddings # The transformer. lowerCAmelCase = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder'] # The regex to extract layer names. lowerCAmelCase = re.compile(R'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' ) # The simple map of names for "automated" rules. lowerCAmelCase = { 'attention.dense': '.attn.c_proj.', 'self_attention.dense': '.attn.c_proj.', 'mlp.dense_h_to_4h': '.mlp.c_fc.', 'mlp.dense_4h_to_h': '.mlp.c_proj.', } # Extract the layers. for key, val in transformer.items(): # Match the name. lowerCAmelCase = layer_re.match(lowerCamelCase ) # Stop if that's not a layer if m is None: break # The index of the layer. lowerCAmelCase = int(m.group(1 ) ) # The name of the operation. lowerCAmelCase = m.group(2 ) # Is it a weight or a bias? lowerCAmelCase = m.group(3 ) # The name of the layer. lowerCAmelCase = f'''transformer.h.{layer_idx}''' # For layernorm(s), simply store the layer norm. if op_name.endswith('layernorm' ): lowerCAmelCase = 'ln_1' if op_name.startswith('input' ) else 'ln_2' lowerCAmelCase = val # Transpose the QKV matrix. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "weight": # Insert a tensor of 1x1xDxD bias. lowerCAmelCase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view( 1 , 1 , lowerCamelCase , lowerCamelCase ) lowerCAmelCase = causal_mask # Insert a "dummy" tensor for masked_bias. lowerCAmelCase = torch.tensor(-1e4 , dtype=torch.floataa ) lowerCAmelCase = masked_bias lowerCAmelCase = fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase ) # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D. lowerCAmelCase = out_val.transpose(0 , 1 ).contiguous() # Store. lowerCAmelCase = out_val # Transpose the bias. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "bias": lowerCAmelCase = fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase ) # Store. No change of shape. lowerCAmelCase = out_val # Transpose the weights. elif weight_or_bias == "weight": lowerCAmelCase = megatron_to_transformers[op_name] lowerCAmelCase = val.transpose(0 , 1 ) # Copy the bias. elif weight_or_bias == "bias": lowerCAmelCase = megatron_to_transformers[op_name] lowerCAmelCase = val # DEBUG. assert config.n_layer == layer_idx + 1 # The final layernorm. lowerCAmelCase = transformer['final_layernorm.weight'] lowerCAmelCase = transformer['final_layernorm.bias'] # For LM head, transformers' wants the matrix to weight embeddings. lowerCAmelCase = word_embeddings # It should be done! return output_state_dict def a_ ( ): # Create the argument parser. lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('--print-checkpoint-structure' , action='store_true' ) parser.add_argument( 'path_to_checkpoint' , type=lowerCamelCase , help='Path to the checkpoint file (.zip archive or direct .pt file)' , ) parser.add_argument( '--config_file' , default='' , type=lowerCamelCase , help='An optional config json file describing the pre-trained model.' , ) lowerCAmelCase = parser.parse_args() # Extract the basename. lowerCAmelCase = os.path.dirname(args.path_to_checkpoint ) # Load the model. # the .zip is very optional, let's keep it for backward compatibility print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' ) if args.path_to_checkpoint.endswith('.zip' ): with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint: with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict: lowerCAmelCase = torch.load(lowerCamelCase , map_location='cpu' ) else: lowerCAmelCase = torch.load(args.path_to_checkpoint , map_location='cpu' ) lowerCAmelCase = input_state_dict.get('args' , lowerCamelCase ) # Read the config, or default to the model released by NVIDIA. if args.config_file == "": if ds_args is not None: if ds_args.bias_gelu_fusion: lowerCAmelCase = 'gelu_fast' elif ds_args.openai_gelu: lowerCAmelCase = 'gelu_new' else: lowerCAmelCase = 'gelu' else: # in the very early days this used to be "gelu_new" lowerCAmelCase = 'gelu_new' # Spell out all parameters in case the defaults change. lowerCAmelCase = GPTaConfig( vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=lowerCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=lowerCamelCase , summary_activation=lowerCamelCase , summary_proj_to_labels=lowerCamelCase , summary_first_dropout=0.1 , scale_attn_weights=lowerCamelCase , use_cache=lowerCamelCase , bos_token_id=50256 , eos_token_id=50256 , ) else: lowerCAmelCase = GPTaConfig.from_json_file(args.config_file ) lowerCAmelCase = ['GPT2LMHeadModel'] # Convert. print('Converting' ) lowerCAmelCase = convert_megatron_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase ) # Print the structure of converted state dict. if args.print_checkpoint_structure: recursive_print(lowerCamelCase , lowerCamelCase ) # Add tokenizer class info to config # see https://github.com/huggingface/transformers/issues/13906) if ds_args is not None: lowerCAmelCase = ds_args.tokenizer_type if tokenizer_type == "GPT2BPETokenizer": lowerCAmelCase = 'gpt2' elif tokenizer_type == "PretrainedFromHF": lowerCAmelCase = ds_args.tokenizer_name_or_path else: raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' ) else: lowerCAmelCase = 'gpt2' lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase ) lowerCAmelCase = type(lowerCamelCase ).__name__ lowerCAmelCase = tokenizer_class # Store the config to file. print('Saving config' ) config.save_pretrained(lowerCamelCase ) # Save tokenizer based on args print(f'''Adding {tokenizer_class} tokenizer files''' ) tokenizer.save_pretrained(lowerCamelCase ) # Store the state_dict to file. lowerCAmelCase = os.path.join(lowerCamelCase , 'pytorch_model.bin' ) print(f'''Saving checkpoint to "{output_checkpoint_file}"''' ) torch.save(lowerCamelCase , lowerCamelCase ) #################################################################################################### if __name__ == "__main__": main() ####################################################################################################
4
1
"""simple docstring""" _SCREAMING_SNAKE_CASE = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/""" def SCREAMING_SNAKE_CASE__ ( __a ): # Make sure the supplied data is a bytes-like object if not isinstance(__a , __a ): snake_case_ : Union[str, Any] = f"""a bytes-like object is required, not '{data.__class__.__name__}'""" raise TypeError(__a ) snake_case_ : Dict = ''.join(bin(__a )[2:].zfill(8 ) for byte in data ) snake_case_ : List[Any] = len(__a ) % 6 != 0 if padding_needed: # The padding that will be added later snake_case_ : List[str] = b'=' * ((6 - len(__a ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(__a ) % 6) else: snake_case_ : Any = b'' # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(__a ) , 6 ) ).encode() + padding ) def SCREAMING_SNAKE_CASE__ ( __a ): # Make sure encoded_data is either a string or a bytes-like object if not isinstance(__a , __a ) and not isinstance(__a , __a ): snake_case_ : Tuple = ( 'argument should be a bytes-like object or ASCII string, ' f"""not '{encoded_data.__class__.__name__}'""" ) raise TypeError(__a ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(__a , __a ): try: snake_case_ : Union[str, Any] = encoded_data.decode('utf-8' ) except UnicodeDecodeError: raise ValueError('base64 encoded data should only contain ASCII characters' ) snake_case_ : Union[str, Any] = encoded_data.count('=' ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(__a ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one snake_case_ : Optional[int] = encoded_data[:-padding] snake_case_ : Tuple = ''.join( bin(B64_CHARSET.index(__a ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: snake_case_ : List[str] = ''.join( bin(B64_CHARSET.index(__a ) )[2:].zfill(6 ) for char in encoded_data ) snake_case_ : int = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(__a ) , 8 ) ] return bytes(__a ) if __name__ == "__main__": import doctest doctest.testmod()
355
import re import string import numpy as np import datasets _SCREAMING_SNAKE_CASE = """ Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list. """ _SCREAMING_SNAKE_CASE = """ Args: predictions: List of predicted texts. references: List of reference texts. regexes_to_ignore: List, defaults to None. Regex expressions of characters to ignore when calculating the exact matches. Note: these regexes are removed from the input data before the changes based on the options below (e.g. ignore_case, ignore_punctuation, ignore_numbers) are applied. ignore_case: Boolean, defaults to False. If true, turns everything to lowercase so that capitalization differences are ignored. ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. Returns: exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive. Examples: >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"] >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results[\"exact_match\"], 1)) 25.0 >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"] >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True) >>> print(round(results[\"exact_match\"], 1)) 50.0 >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"] >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True) >>> print(round(results[\"exact_match\"], 1)) 75.0 >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"] >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True) >>> print(round(results[\"exact_match\"], 1)) 100.0 >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"] >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results[\"exact_match\"], 1)) 33.3 """ _SCREAMING_SNAKE_CASE = """ """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE_ ( datasets.Metric ): def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , reference_urls=[] , ) def UpperCAmelCase_ ( self : List[Any] , _A : Optional[Any] , _A : Optional[int] , _A : Optional[int]=None , _A : Dict=False , _A : Dict=False , _A : Optional[Any]=False , ) -> List[str]: """simple docstring""" if regexes_to_ignore is not None: for s in regexes_to_ignore: snake_case_ : List[str] = np.array([re.sub(_A , '' , _A ) for x in predictions] ) snake_case_ : int = np.array([re.sub(_A , '' , _A ) for x in references] ) else: snake_case_ : Optional[Any] = np.asarray(_A ) snake_case_ : Optional[Any] = np.asarray(_A ) if ignore_case: snake_case_ : int = np.char.lower(_A ) snake_case_ : List[str] = np.char.lower(_A ) if ignore_punctuation: snake_case_ : str = string.punctuation.maketrans('' , '' , string.punctuation ) snake_case_ : str = np.char.translate(_A , table=_A ) snake_case_ : Any = np.char.translate(_A , table=_A ) if ignore_numbers: snake_case_ : int = string.digits.maketrans('' , '' , string.digits ) snake_case_ : Tuple = np.char.translate(_A , table=_A ) snake_case_ : Optional[Any] = np.char.translate(_A , table=_A ) snake_case_ : Optional[Any] = predictions == references return {"exact_match": np.mean(_A ) * 100}
88
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) a : Any = { "configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[int] = ["ConvNextFeatureExtractor"] a : Dict = ["ConvNextImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Any = [ "CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "ConvNextForImageClassification", "ConvNextModel", "ConvNextPreTrainedModel", "ConvNextBackbone", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Tuple = [ "TFConvNextForImageClassification", "TFConvNextModel", "TFConvNextPreTrainedModel", ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys a : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
311
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowercase : str = logging.get_logger(__name__) lowercase : Union[str, Any] = { "post_extract_proj": "feature_projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.upsample.0": "encoder.upsample.projection", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , __A ) -> Dict: for attribute in key.split('.' ): _snake_case = getattr(__A , __A ) if weight_type is not None: _snake_case = getattr(__A , __A ).shape else: _snake_case = hf_pointer.shape assert hf_shape == value.shape, ( F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' F' {value.shape} for {full_name}' ) if weight_type == "weight": _snake_case = value elif weight_type == "weight_g": _snake_case = value elif weight_type == "weight_v": _snake_case = value elif weight_type == "bias": _snake_case = value else: _snake_case = value logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Any: _snake_case = [] _snake_case = fairseq_model.state_dict() _snake_case = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _snake_case = False if "conv_layers" in name: load_conv_layer( __A , __A , __A , __A , hf_model.config.feat_extract_norm == 'group' , ) _snake_case = True else: for key, mapped_key in MAPPING.items(): _snake_case = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: _snake_case = True if "*" in mapped_key: _snake_case = name.split(__A )[0].split('.' )[-2] _snake_case = mapped_key.replace('*' , __A ) if "weight_g" in name: _snake_case = 'weight_g' elif "weight_v" in name: _snake_case = 'weight_v' elif "weight" in name: _snake_case = 'weight' elif "bias" in name: _snake_case = 'bias' else: _snake_case = None set_recursively(__A , __A , __A , __A , __A ) continue if not is_used: unused_weights.append(__A ) logger.warning(F'Unused weights: {unused_weights}' ) def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , __A ) -> int: _snake_case = full_name.split('conv_layers.' )[-1] _snake_case = name.split('.' ) _snake_case = int(items[0] ) _snake_case = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) _snake_case = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) _snake_case = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was' " found." ) _snake_case = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'{full_name} has size {value.shape}, but' F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) _snake_case = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(__A ) def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> str: _snake_case = SEWConfig() if is_finetuned: _snake_case = model.wav_encoder.wav_model.cfg else: _snake_case = model.cfg _snake_case = fs_config.conv_bias _snake_case = eval(fs_config.conv_feature_layers ) _snake_case = [x[0] for x in conv_layers] _snake_case = [x[1] for x in conv_layers] _snake_case = [x[2] for x in conv_layers] _snake_case = 'gelu' _snake_case = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group' _snake_case = 0.0 _snake_case = fs_config.activation_fn.name _snake_case = fs_config.encoder_embed_dim _snake_case = 0.0_2 _snake_case = fs_config.encoder_ffn_embed_dim _snake_case = 1e-5 _snake_case = fs_config.encoder_layerdrop _snake_case = fs_config.encoder_attention_heads _snake_case = fs_config.conv_pos_groups _snake_case = fs_config.conv_pos _snake_case = len(__A ) _snake_case = fs_config.encoder_layers _snake_case = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: _snake_case = model.cfg _snake_case = fs_config.final_dropout _snake_case = fs_config.layerdrop _snake_case = fs_config.activation_dropout _snake_case = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 _snake_case = fs_config.attention_dropout _snake_case = fs_config.dropout_input _snake_case = fs_config.dropout _snake_case = fs_config.mask_channel_length _snake_case = fs_config.mask_channel_prob _snake_case = fs_config.mask_length _snake_case = fs_config.mask_prob _snake_case = 'Wav2Vec2FeatureExtractor' _snake_case = 'Wav2Vec2CTCTokenizer' return config @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( __A , __A , __A=None , __A=None , __A=True ) -> List[str]: if is_finetuned: _snake_case , _snake_case , _snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: _snake_case , _snake_case , _snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: _snake_case = SEWConfig.from_pretrained(__A ) else: _snake_case = convert_config(model[0] , __A ) _snake_case = model[0].eval() _snake_case = True if config.feat_extract_norm == 'layer' else False _snake_case = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=__A , return_attention_mask=__A , ) if is_finetuned: if dict_path: _snake_case = Dictionary.load(__A ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _snake_case = target_dict.pad_index _snake_case = target_dict.bos_index _snake_case = target_dict.pad_index _snake_case = target_dict.bos_index _snake_case = target_dict.eos_index _snake_case = len(target_dict.symbols ) _snake_case = os.path.join(__A , 'vocab.json' ) if not os.path.isdir(__A ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__A ) ) return os.makedirs(__A , exist_ok=__A ) with open(__A , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(target_dict.indices , __A ) _snake_case = WavaVecaCTCTokenizer( __A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__A , ) _snake_case = WavaVecaProcessor(feature_extractor=__A , tokenizer=__A ) processor.save_pretrained(__A ) _snake_case = SEWForCTC(__A ) else: _snake_case = SEWModel(__A ) feature_extractor.save_pretrained(__A ) recursively_load_weights(__A , __A , __A ) hf_model.save_pretrained(__A ) if __name__ == "__main__": lowercase : int = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) lowercase : Union[str, Any] = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
42
0
"""simple docstring""" def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ): return [sentence[i : i + ngram_size] for i in range(len(lowerCAmelCase__ ) - ngram_size + 1 )] if __name__ == "__main__": from doctest import testmod testmod()
350
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase = logging.get_logger(__name__) lowerCamelCase = { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""", """roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""", } class lowercase__ ( SCREAMING_SNAKE_CASE ): '''simple docstring''' UpperCamelCase = '''roberta''' def __init__( self : int , _UpperCAmelCase : List[Any]=50265 , _UpperCAmelCase : str=768 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Tuple=3072 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Optional[int]=512 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : Optional[Any]=1e-12 , _UpperCAmelCase : Dict=1 , _UpperCAmelCase : str=0 , _UpperCAmelCase : int=2 , _UpperCAmelCase : Tuple="absolute" , _UpperCAmelCase : Any=True , _UpperCAmelCase : int=None , **_UpperCAmelCase : List[str] , ) -> Any: '''simple docstring''' super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) UpperCAmelCase_ = vocab_size UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = hidden_act UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = type_vocab_size UpperCAmelCase_ = initializer_range UpperCAmelCase_ = layer_norm_eps UpperCAmelCase_ = position_embedding_type UpperCAmelCase_ = use_cache UpperCAmelCase_ = classifier_dropout class lowercase__ ( SCREAMING_SNAKE_CASE ): '''simple docstring''' @property def lowercase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"} else: UpperCAmelCase_ = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
241
0
import numpy as np class _SCREAMING_SNAKE_CASE : def __init__( self : int ): UpperCamelCase :int = (0, 0) UpperCamelCase :List[str] = None UpperCamelCase :Union[str, Any] = 0 UpperCamelCase :Optional[Any] = 0 UpperCamelCase :Optional[Any] = 0 def __eq__( self : Union[str, Any] , __lowerCamelCase : int ): return self.position == cell.position def _A ( self : Optional[Any] ): print(self.position ) class _SCREAMING_SNAKE_CASE : def __init__( self : List[Any] , __lowerCamelCase : Any=(5, 5) ): UpperCamelCase :Tuple = np.zeros(__lowerCamelCase ) UpperCamelCase :str = world_size[0] UpperCamelCase :List[str] = world_size[1] def _A ( self : List[Any] ): print(self.w ) def _A ( self : Optional[Any] , __lowerCamelCase : str ): UpperCamelCase :str = [ (-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1), ] UpperCamelCase :Optional[int] = cell.position[0] UpperCamelCase :Tuple = cell.position[1] UpperCamelCase :Optional[Any] = [] for n in neughbour_cord: UpperCamelCase :int = current_x + n[0] UpperCamelCase :Optional[Any] = current_y + n[1] if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit: UpperCamelCase :List[str] = Cell() UpperCamelCase :str = (x, y) UpperCamelCase :int = cell neighbours.append(__lowerCamelCase ) return neighbours def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any , __magic_name__ : str , __magic_name__ : Optional[int] ) -> List[Any]: """simple docstring""" UpperCamelCase :List[str] = [] UpperCamelCase :List[str] = [] _open.append(__magic_name__ ) while _open: UpperCamelCase :Optional[Any] = np.argmin([n.f for n in _open] ) UpperCamelCase :Optional[Any] = _open[min_f] _closed.append(_open.pop(__magic_name__ ) ) if current == goal: break for n in world.get_neigbours(__magic_name__ ): for c in _closed: if c == n: continue UpperCamelCase :List[Any] = current.g + 1 UpperCamelCase , UpperCamelCase :Dict = n.position UpperCamelCase , UpperCamelCase :List[str] = goal.position UpperCamelCase :Any = (ya - ya) ** 2 + (xa - xa) ** 2 UpperCamelCase :Optional[int] = n.h + n.g for c in _open: if c == n and c.f < n.f: continue _open.append(__magic_name__ ) UpperCamelCase :int = [] while current.parent is not None: path.append(current.position ) UpperCamelCase :Optional[int] = current.parent path.append(current.position ) return path[::-1] if __name__ == "__main__": UpperCAmelCase_ : List[Any] = Gridworld() # Start position and goal UpperCAmelCase_ : List[str] = Cell() UpperCAmelCase_ : str = (0, 0) UpperCAmelCase_ : Tuple = Cell() UpperCAmelCase_ : int = (4, 4) print(F'''path from {start.position} to {goal.position}''') UpperCAmelCase_ : Tuple = astar(world, start, goal) # Just for visual reasons. for i in s: UpperCAmelCase_ : Union[str, Any] = 1 print(world.w)
38
"""simple docstring""" import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __A = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :List[str] = XGLMTokenizer __magic_name__ :Any = XGLMTokenizerFast __magic_name__ :Dict = True __magic_name__ :Union[str, Any] = True def snake_case ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ :int = XGLMTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = '<pad>' lowerCAmelCase__ :int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(len(__UpperCAmelCase ) , 1_0_0_8 ) def snake_case ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_8 ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = XGLMTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = tokenizer.tokenize('This is a test' ) self.assertListEqual(__UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , ) lowerCAmelCase__ :int = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( __UpperCAmelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) lowerCAmelCase__ :Tuple = tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) self.assertListEqual( __UpperCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4] ] , ) lowerCAmelCase__ :Optional[int] = tokenizer.convert_ids_to_tokens(__UpperCAmelCase ) self.assertListEqual( __UpperCAmelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) @cached_property def snake_case ( self ): '''simple docstring''' return XGLMTokenizer.from_pretrained('facebook/xglm-564M' ) def snake_case ( self ): '''simple docstring''' with tempfile.NamedTemporaryFile() as f: shutil.copyfile(__UpperCAmelCase , f.name ) lowerCAmelCase__ :Dict = XGLMTokenizer(f.name , keep_accents=__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = pickle.dumps(__UpperCAmelCase ) pickle.loads(__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' if not self.test_rust_tokenizer: return lowerCAmelCase__ :Optional[Any] = self.get_tokenizer() lowerCAmelCase__ :List[str] = self.get_rust_tokenizer() lowerCAmelCase__ :Optional[Any] = 'I was born in 92000, and this is falsé.' lowerCAmelCase__ :Dict = tokenizer.tokenize(__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = rust_tokenizer.tokenize(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :int = self.get_rust_tokenizer() lowerCAmelCase__ :Dict = tokenizer.encode(__UpperCAmelCase ) lowerCAmelCase__ :Tuple = rust_tokenizer.encode(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) @slow def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :str = 'Hello World!' lowerCAmelCase__ :Tuple = [2, 3_1_2_2_7, 4_4_4_7, 3_5] self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) ) @slow def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth' ) # fmt: off lowerCAmelCase__ :List[str] = [2, 1_0_1_8, 6_7, 1_1, 1_9_8_8, 2_6_1_7, 5_6_3_1, 2_7_8, 1_1, 3_4_0_7, 4_8, 7_1_6_3_0, 2_8_0_8_5, 4, 3_2_3_4, 1_5_7, 1_3, 6, 5, 6, 4, 3_5_2_6, 7_6_8, 1_5, 6_5_9, 5_7, 2_9_8, 3_9_8_3, 8_6_4, 1_2_9, 2_1, 6, 5, 1_3_6_7_5, 3_7_7, 6_5_2, 7_5_8_0, 1_0_3_4_1, 1_5_5, 2_8_1_7, 4_2_2, 1_6_6_6, 7, 1_6_7_4, 5_3, 1_1_3, 2_0_2_2_7_7, 1_7_8_9_2, 3_3, 6_0, 8_7, 4, 3_2_3_4, 1_5_7, 6_1, 2_6_6_7, 5_2_3_7_6, 1_9, 8_8, 2_3, 7_3_5] # fmt: on self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) ) @slow def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = { 'input_ids': [[2, 1_0_8_8_2_5, 1_1_6_3, 1_5, 8_8_0_1_0, 4_7_3, 1_5_8_9_8, 1_5_7, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 2_3_8_0_2_1, 1_1_6_3, 5_3, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 5_3_2_8_3, 1_8_2_3_9_6, 8, 1_8_5_6_6, 1_6, 3_6_7_3_3, 4_1_0_1, 8, 2_3_0, 2_4_4_0_1_7, 1_2_2_5_5_3, 7, 1_5, 1_3_2_5_9_7, 4, 2_9_3, 1_2_5_1_1, 7_6_1_0, 4, 3_4_1_4, 1_3_2_5_9_7, 9, 4, 3_2_3_6_1, 3_6_2, 4, 7_3_4, 2_8_5_1_2, 3_2_5_6_9, 1_8, 4, 3_2_3_6_1, 2_6_0_9_6, 1_4_9_8_2, 7_3, 1_8_7_1_5, 2_1_4_3_3, 2_3_5_2_6_1, 1_5, 4_9_2, 1_2_4_2_7, 1_6, 5_3, 1_8_7_1_5, 2_1_4_3_3, 6_5_4_5_4, 1_5, 2_3_6_5_9, 5_6_3, 1_6, 2_7_8, 5_9_7, 2_8_4_3, 5_9_5, 7_9_3_1, 1_8_2_3_9_6, 6_4_1_8_6, 2_2, 8_8_6, 5_9_5, 1_3_2_9_8_1, 5_3, 2_5_5_4_0, 3_4_4_9, 4_3_9_8_2, 3_9_9_0_1, 5_9_5_1, 8_7_8, 3_3_0, 4, 2_7_6_9_4, 8_0_2_6_9, 3_1_2, 5_3, 6_5_1_7, 1_1_7_8_0, 6_1_1, 2_0_4_0_8, 5], [2, 6, 1_3_2_5_9_7, 6_7, 4_2_8_9_7, 3_3, 5_9_2, 8, 1_6_3_7_2_9, 2_5_5_4_0, 3_6_1, 1_3_6_9_9_7, 1_0_9_5_1_4, 1_7_3_2_3_0, 7, 5_0_1, 6_0, 1_0_2_9_1_3, 1_9_6, 5_6_3_1, 2_3_5, 6_3_2_4_3, 4_7_3, 6, 2_3_1_7_5_7, 7_4, 5_2_7_7, 7_9_0_5, 5_3, 3_0_9_5, 3_7_3_1_7, 2_2, 4_5_4, 1_8_3_8_7_4, 5], [2, 2_6_8, 3_1_2_9_8, 4_6_5_3_0, 6, 1_3_2_9_3_5, 4_3_8_3_1, 7, 5_9_7, 3_2, 2_4, 3_6_8_8, 9_8_6_5, 5]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCAmelCase , model_name='facebook/xglm-564M' , padding=__UpperCAmelCase , )
293
0
'''simple docstring''' import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def _a( UpperCamelCase__ : Union[str, Any] ): '''simple docstring''' if "model" in orig_key: SCREAMING_SNAKE_CASE__ : int =orig_key.replace('''model.''', '''''' ) if "norm1" in orig_key: SCREAMING_SNAKE_CASE__ : Optional[int] =orig_key.replace('''norm1''', '''attention.output.LayerNorm''' ) if "norm2" in orig_key: SCREAMING_SNAKE_CASE__ : str =orig_key.replace('''norm2''', '''output.LayerNorm''' ) if "norm" in orig_key: SCREAMING_SNAKE_CASE__ : str =orig_key.replace('''norm''', '''LayerNorm''' ) if "transformer" in orig_key: SCREAMING_SNAKE_CASE__ : str =orig_key.split('''.''' )[0].split('''_''' )[-1] SCREAMING_SNAKE_CASE__ : str =orig_key.replace(f"transformer_{layer_num}", f"encoder.layer.{layer_num}" ) if "mha.attn" in orig_key: SCREAMING_SNAKE_CASE__ : int =orig_key.replace('''mha.attn''', '''attention.self''' ) if "mha" in orig_key: SCREAMING_SNAKE_CASE__ : Union[str, Any] =orig_key.replace('''mha''', '''attention''' ) if "W_q" in orig_key: SCREAMING_SNAKE_CASE__ : Optional[Any] =orig_key.replace('''W_q''', '''self.query''' ) if "W_k" in orig_key: SCREAMING_SNAKE_CASE__ : Tuple =orig_key.replace('''W_k''', '''self.key''' ) if "W_v" in orig_key: SCREAMING_SNAKE_CASE__ : Union[str, Any] =orig_key.replace('''W_v''', '''self.value''' ) if "ff1" in orig_key: SCREAMING_SNAKE_CASE__ : Optional[int] =orig_key.replace('''ff1''', '''intermediate.dense''' ) if "ff2" in orig_key: SCREAMING_SNAKE_CASE__ : Dict =orig_key.replace('''ff2''', '''output.dense''' ) if "ff" in orig_key: SCREAMING_SNAKE_CASE__ : Dict =orig_key.replace('''ff''', '''output.dense''' ) if "mlm_class" in orig_key: SCREAMING_SNAKE_CASE__ : List[str] =orig_key.replace('''mlm.mlm_class''', '''cls.predictions.decoder''' ) if "mlm" in orig_key: SCREAMING_SNAKE_CASE__ : int =orig_key.replace('''mlm''', '''cls.predictions.transform''' ) if "cls" not in orig_key: SCREAMING_SNAKE_CASE__ : Dict ='''yoso.''' + orig_key return orig_key def _a( UpperCamelCase__ : str, UpperCamelCase__ : int ): '''simple docstring''' for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE__ : Dict =orig_state_dict.pop(UpperCamelCase__ ) if ("pooler" in key) or ("sen_class" in key): continue else: SCREAMING_SNAKE_CASE__ : Optional[Any] =val SCREAMING_SNAKE_CASE__ : List[Any] =orig_state_dict['''cls.predictions.decoder.bias'''] SCREAMING_SNAKE_CASE__ : int =torch.arange(UpperCamelCase__ ).expand((1, -1) ) + 2 return orig_state_dict def _a( UpperCamelCase__ : int, UpperCamelCase__ : List[Any], UpperCamelCase__ : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str =torch.load(UpperCamelCase__, map_location='''cpu''' )['''model_state_dict'''] SCREAMING_SNAKE_CASE__ : List[Any] =YosoConfig.from_json_file(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Any =YosoForMaskedLM(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : List[Any] =convert_checkpoint_helper(config.max_position_embeddings, UpperCamelCase__ ) print(model.load_state_dict(UpperCamelCase__ ) ) model.eval() model.save_pretrained(UpperCamelCase__ ) print(f"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The json file for YOSO model config.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) a_ = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
222
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class __SCREAMING_SNAKE_CASE : snake_case_ = PegasusConfig snake_case_ = {} snake_case_ = """gelu""" def __init__( self : int , __lowercase : Optional[Any] , __lowercase : int=13 , __lowercase : List[str]=7 , __lowercase : Dict=True , __lowercase : Tuple=False , __lowercase : Optional[Any]=99 , __lowercase : str=32 , __lowercase : List[str]=2 , __lowercase : str=4 , __lowercase : Optional[int]=37 , __lowercase : List[Any]=0.1 , __lowercase : List[Any]=0.1 , __lowercase : List[Any]=40 , __lowercase : str=2 , __lowercase : List[Any]=1 , __lowercase : Optional[Any]=0 , ) -> Tuple: SCREAMING_SNAKE_CASE__ : Optional[Any] =parent SCREAMING_SNAKE_CASE__ : List[Any] =batch_size SCREAMING_SNAKE_CASE__ : Optional[int] =seq_length SCREAMING_SNAKE_CASE__ : Optional[Any] =is_training SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_labels SCREAMING_SNAKE_CASE__ : str =vocab_size SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_size SCREAMING_SNAKE_CASE__ : List[str] =num_hidden_layers SCREAMING_SNAKE_CASE__ : int =num_attention_heads SCREAMING_SNAKE_CASE__ : List[str] =intermediate_size SCREAMING_SNAKE_CASE__ : List[Any] =hidden_dropout_prob SCREAMING_SNAKE_CASE__ : List[Any] =attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : List[Any] =max_position_embeddings SCREAMING_SNAKE_CASE__ : Optional[Any] =eos_token_id SCREAMING_SNAKE_CASE__ : Any =pad_token_id SCREAMING_SNAKE_CASE__ : Union[str, Any] =bos_token_id def __magic_name__ ( self : Any ) -> str: SCREAMING_SNAKE_CASE__ : List[str] =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) SCREAMING_SNAKE_CASE__ : Optional[int] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) SCREAMING_SNAKE_CASE__ : Any =tf.concat([input_ids, eos_tensor] , axis=1 ) SCREAMING_SNAKE_CASE__ : Dict =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ : Optional[int] =self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =prepare_pegasus_inputs_dict(__lowercase , __lowercase , __lowercase ) return config, inputs_dict def __magic_name__ ( self : Optional[int] , __lowercase : List[str] , __lowercase : Optional[int] ) -> Tuple: SCREAMING_SNAKE_CASE__ : List[Any] =TFPegasusModel(config=__lowercase ).get_decoder() SCREAMING_SNAKE_CASE__ : List[str] =inputs_dict['''input_ids'''] SCREAMING_SNAKE_CASE__ : Tuple =input_ids[:1, :] SCREAMING_SNAKE_CASE__ : Tuple =inputs_dict['''attention_mask'''][:1, :] SCREAMING_SNAKE_CASE__ : Tuple =inputs_dict['''head_mask'''] SCREAMING_SNAKE_CASE__ : List[str] =1 # first forward pass SCREAMING_SNAKE_CASE__ : Any =model(__lowercase , attention_mask=__lowercase , head_mask=__lowercase , use_cache=__lowercase ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =outputs.to_tuple() # create hypothetical next token and extent to next_input_ids SCREAMING_SNAKE_CASE__ : str =ids_tensor((self.batch_size, 3) , config.vocab_size ) SCREAMING_SNAKE_CASE__ : List[Any] =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and SCREAMING_SNAKE_CASE__ : Tuple =tf.concat([input_ids, next_tokens] , axis=-1 ) SCREAMING_SNAKE_CASE__ : Optional[Any] =tf.concat([attention_mask, next_attn_mask] , axis=-1 ) SCREAMING_SNAKE_CASE__ : int =model(__lowercase , attention_mask=__lowercase )[0] SCREAMING_SNAKE_CASE__ : Any =model(__lowercase , attention_mask=__lowercase , past_key_values=__lowercase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice SCREAMING_SNAKE_CASE__ : Optional[Any] =int(ids_tensor((1,) , output_from_past.shape[-1] ) ) SCREAMING_SNAKE_CASE__ : Any =output_from_no_past[:, -3:, random_slice_idx] SCREAMING_SNAKE_CASE__ : List[str] =output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__lowercase , __lowercase , rtol=1e-3 ) def _a( UpperCamelCase__ : Any, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Optional[Any]=None, UpperCamelCase__ : Optional[Any]=None, UpperCamelCase__ : Union[str, Any]=None, UpperCamelCase__ : Any=None, UpperCamelCase__ : Optional[Any]=None, ): '''simple docstring''' if attention_mask is None: SCREAMING_SNAKE_CASE__ : str =tf.cast(tf.math.not_equal(UpperCamelCase__, config.pad_token_id ), tf.inta ) if decoder_attention_mask is None: SCREAMING_SNAKE_CASE__ : Any =tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ), ], axis=-1, ) if head_mask is None: SCREAMING_SNAKE_CASE__ : int =tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: SCREAMING_SNAKE_CASE__ : List[Any] =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: SCREAMING_SNAKE_CASE__ : List[str] =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ): snake_case_ = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () snake_case_ = (TFPegasusForConditionalGeneration,) if is_tf_available() else () snake_case_ = ( { """conversational""": TFPegasusForConditionalGeneration, """feature-extraction""": TFPegasusModel, """summarization""": TFPegasusForConditionalGeneration, """text2text-generation""": TFPegasusForConditionalGeneration, """translation""": TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) snake_case_ = True snake_case_ = False snake_case_ = False def __magic_name__ ( self : Union[str, Any] ) -> str: SCREAMING_SNAKE_CASE__ : List[Any] =TFPegasusModelTester(self ) SCREAMING_SNAKE_CASE__ : Dict =ConfigTester(self , config_class=__lowercase ) def __magic_name__ ( self : int ) -> Any: self.config_tester.run_common_tests() def __magic_name__ ( self : Optional[Any] ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__lowercase ) @require_sentencepiece @require_tokenizers @require_tf class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): snake_case_ = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] snake_case_ = [ """California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to""" """ reduce the risk of wildfires.""", """N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.""", ] # differs slightly from pytorch, likely due to numerical differences in linear layers snake_case_ = """google/pegasus-xsum""" @cached_property def __magic_name__ ( self : Optional[int] ) -> Tuple: return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def __magic_name__ ( self : List[Any] ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : Optional[int] =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def __magic_name__ ( self : List[str] , **__lowercase : Any ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.translate_src_text(**__lowercase ) assert self.expected_text == generated_words def __magic_name__ ( self : Optional[Any] , **__lowercase : List[str] ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.tokenizer(self.src_text , **__lowercase , padding=__lowercase , return_tensors='''tf''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowercase , ) SCREAMING_SNAKE_CASE__ : Any =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowercase ) return generated_words @slow def __magic_name__ ( self : Optional[Any] ) -> Optional[int]: self._assert_generated_batch_equal_expected()
222
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase_ = { """vocab_file""": { """distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""", """distilbert-base-uncased-distilled-squad""": ( """https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt""" ), """distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""", """distilbert-base-cased-distilled-squad""": ( """https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt""" ), """distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""", """distilbert-base-multilingual-cased""": ( """https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""", """distilbert-base-uncased-distilled-squad""": ( """https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json""" ), """distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""", """distilbert-base-cased-distilled-squad""": ( """https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json""" ), """distilbert-base-german-cased""": ( """https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json""" ), """distilbert-base-multilingual-cased""": ( """https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json""" ), }, } UpperCamelCase_ = { """distilbert-base-uncased""": 5_12, """distilbert-base-uncased-distilled-squad""": 5_12, """distilbert-base-cased""": 5_12, """distilbert-base-cased-distilled-squad""": 5_12, """distilbert-base-german-cased""": 5_12, """distilbert-base-multilingual-cased""": 5_12, } UpperCamelCase_ = { """distilbert-base-uncased""": {"""do_lower_case""": True}, """distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True}, """distilbert-base-cased""": {"""do_lower_case""": False}, """distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False}, """distilbert-base-german-cased""": {"""do_lower_case""": False}, """distilbert-base-multilingual-cased""": {"""do_lower_case""": False}, } class a_ (__lowerCamelCase ): __lowerCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES __lowerCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase : Dict = PRETRAINED_INIT_CONFIGURATION __lowerCAmelCase : Dict = ['input_ids', 'attention_mask'] __lowerCAmelCase : Union[str, Any] = DistilBertTokenizer def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ): super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , ) _lowerCAmelCase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , UpperCamelCase_ ) != do_lower_case or normalizer_state.get("""strip_accents""" , UpperCamelCase_ ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , UpperCamelCase_ ) != tokenize_chinese_chars ): _lowerCAmelCase : int = getattr(UpperCamelCase_ , normalizer_state.pop("""type""" ) ) _lowerCAmelCase : Optional[int] = do_lower_case _lowerCAmelCase : Optional[Any] = strip_accents _lowerCAmelCase : List[Any] = tokenize_chinese_chars _lowerCAmelCase : List[Any] = normalizer_class(**UpperCamelCase_ ) _lowerCAmelCase : int = do_lower_case def __UpperCamelCase ( self , snake_case_ , snake_case_=None ): _lowerCAmelCase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ): _lowerCAmelCase : Union[str, Any] = [self.sep_token_id] _lowerCAmelCase : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ): _lowerCAmelCase : Optional[Any] = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ ) return tuple(UpperCamelCase_ )
309
from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class lowerCamelCase__: UpperCAmelCase__ : int UpperCAmelCase__ : TreeNode | None = None UpperCAmelCase__ : TreeNode | None = None UpperCAmelCase_ = namedtuple('CoinsDistribResult', 'moves excess') def lowerCamelCase__ ( A__ : TreeNode | None ): '''simple docstring''' if root is None: return 0 # Validation def count_nodes(A__ : TreeNode | None ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(A__ : TreeNode | None ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(A__ ) != count_coins(A__ ): raise ValueError("""The nodes number should be same as the number of coins""" ) # Main calculation def get_distrib(A__ : TreeNode | None ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) __lowerCamelCase, __lowerCamelCase = get_distrib(node.left ) __lowerCamelCase, __lowerCamelCase = get_distrib(node.right ) __lowerCamelCase = 1 - left_distrib_excess __lowerCamelCase = 1 - right_distrib_excess __lowerCamelCase = ( left_distrib_moves + right_distrib_moves + abs(A__ ) + abs(A__ ) ) __lowerCamelCase = node.data - coins_to_left - coins_to_right return CoinsDistribResult(A__ , A__ ) return get_distrib(A__ )[0] if __name__ == "__main__": import doctest doctest.testmod()
12
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a = { 'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'], 'processing_git': ['GitProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'GIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GitForCausalLM', 'GitModel', 'GitPreTrainedModel', 'GitVisionModel', ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
354
import inspect import unittest import numpy as np from transformers import BeitConfig from transformers.testing_utils import require_flax, require_vision, slow from transformers.utils import cached_property, is_flax_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor if is_flax_available(): import jax from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class __a( unittest.TestCase ): """simple docstring""" def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=100 ,_SCREAMING_SNAKE_CASE=13 ,_SCREAMING_SNAKE_CASE=30 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=5 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=37 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=3 ,) -> Dict: UpperCAmelCase_ : Union[str, Any] = parent UpperCAmelCase_ : Tuple = vocab_size UpperCAmelCase_ : List[str] = batch_size UpperCAmelCase_ : Any = image_size UpperCAmelCase_ : Union[str, Any] = patch_size UpperCAmelCase_ : Union[str, Any] = num_channels UpperCAmelCase_ : Any = is_training UpperCAmelCase_ : Union[str, Any] = use_labels UpperCAmelCase_ : Union[str, Any] = hidden_size UpperCAmelCase_ : Union[str, Any] = num_hidden_layers UpperCAmelCase_ : int = num_attention_heads UpperCAmelCase_ : Union[str, Any] = intermediate_size UpperCAmelCase_ : Any = hidden_act UpperCAmelCase_ : Optional[int] = hidden_dropout_prob UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob UpperCAmelCase_ : List[str] = type_sequence_label_size UpperCAmelCase_ : Union[str, Any] = initializer_range # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase_ : Dict = (image_size // patch_size) ** 2 UpperCAmelCase_ : List[str] = num_patches + 1 def a__ ( self ) -> str: UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : Tuple = None if self.use_labels: UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) UpperCAmelCase_ : Optional[int] = BeitConfig( vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_SCREAMING_SNAKE_CASE ,initializer_range=self.initializer_range ,) return config, pixel_values, labels def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]: UpperCAmelCase_ : Union[str, Any] = FlaxBeitModel(config=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[str] = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple: UpperCAmelCase_ : Union[str, Any] = FlaxBeitForMaskedImageModeling(config=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str: UpperCAmelCase_ : Dict = self.type_sequence_label_size UpperCAmelCase_ : int = FlaxBeitForImageClassification(config=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase_ : Any = 1 UpperCAmelCase_ : List[Any] = FlaxBeitForImageClassification(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> Optional[Any]: UpperCAmelCase_ : Optional[int] = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ), ( UpperCAmelCase_ ), ( UpperCAmelCase_ ), ) : List[str] = config_and_inputs UpperCAmelCase_ : int = {'''pixel_values''': pixel_values} return config, inputs_dict @require_flax class __a( _a , unittest.TestCase ): """simple docstring""" lowerCAmelCase = ( (FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else () ) def a__ ( self ) -> None: UpperCAmelCase_ : List[Any] = FlaxBeitModelTester(self ) UpperCAmelCase_ : List[str] = ConfigTester(self ,config_class=_SCREAMING_SNAKE_CASE ,has_text_modality=_SCREAMING_SNAKE_CASE ,hidden_size=37 ) def a__ ( self ) -> Optional[int]: self.config_tester.run_common_tests() def a__ ( self ) -> List[Any]: UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : Optional[Any] = [*signature.parameters.keys()] UpperCAmelCase_ : Optional[int] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] ,_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> Union[str, Any]: UpperCAmelCase_, UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase_ : List[Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE ) @jax.jit def model_jitted(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ): return model(pixel_values=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) with self.subTest('''JIT Enabled''' ): UpperCAmelCase_ : Dict = model_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): UpperCAmelCase_ : List[str] = model_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple() self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,len(_SCREAMING_SNAKE_CASE ) ) for jitted_output, output in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): self.assertEqual(jitted_output.shape ,output.shape ) def a__ ( self ) -> List[str]: UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> Union[str, Any]: UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> List[str]: UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE ) @slow def a__ ( self ) -> List[Any]: for model_class_name in self.all_model_classes: UpperCAmelCase_ : List[Any] = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''' ) UpperCAmelCase_ : Optional[int] = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @require_flax class __a( unittest.TestCase ): """simple docstring""" @cached_property def a__ ( self ) -> Dict: return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None @slow def a__ ( self ) -> Optional[int]: UpperCAmelCase_ : Tuple = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ) UpperCAmelCase_ : List[Any] = self.default_image_processor UpperCAmelCase_ : Optional[Any] = prepare_img() UpperCAmelCase_ : Optional[Any] = image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors='''np''' ).pixel_values # prepare bool_masked_pos UpperCAmelCase_ : Union[str, Any] = np.ones((1, 196) ,dtype=_SCREAMING_SNAKE_CASE ) # forward pass UpperCAmelCase_ : Optional[int] = model(pixel_values=_SCREAMING_SNAKE_CASE ,bool_masked_pos=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[str] = outputs.logits # verify the logits UpperCAmelCase_ : List[str] = (1, 196, 8_192) self.assertEqual(logits.shape ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[str] = np.array( [[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] ) self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-2 ) ) @slow def a__ ( self ) -> Union[str, Any]: UpperCAmelCase_ : Any = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ) UpperCAmelCase_ : Any = self.default_image_processor UpperCAmelCase_ : Any = prepare_img() UpperCAmelCase_ : Union[str, Any] = image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors='''np''' ) # forward pass UpperCAmelCase_ : Optional[Any] = model(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[Any] = outputs.logits # verify the logits UpperCAmelCase_ : Dict = (1, 1_000) self.assertEqual(logits.shape ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = np.array([-1.23_85, -1.09_87, -1.01_08] ) self.assertTrue(np.allclose(logits[0, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) ) UpperCAmelCase_ : Dict = 281 self.assertEqual(logits.argmax(-1 ).item() ,_SCREAMING_SNAKE_CASE ) @slow def a__ ( self ) -> Union[str, Any]: UpperCAmelCase_ : str = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ) UpperCAmelCase_ : Tuple = self.default_image_processor UpperCAmelCase_ : Any = prepare_img() UpperCAmelCase_ : Dict = image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors='''np''' ) # forward pass UpperCAmelCase_ : Dict = model(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[Any] = outputs.logits # verify the logits UpperCAmelCase_ : Union[str, Any] = (1, 21_841) self.assertEqual(logits.shape ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = np.array([1.68_81, -0.27_87, 0.59_01] ) self.assertTrue(np.allclose(logits[0, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) ) UpperCAmelCase_ : Dict = 2_396 self.assertEqual(logits.argmax(-1 ).item() ,_SCREAMING_SNAKE_CASE )
235
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : str = logging.get_logger(__name__) A : List[Any] = { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json" ), "distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json", "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json" ), "distilbert-base-uncased-finetuned-sst-2-english": ( "https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json" ), } class _UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' __UpperCAmelCase : Any ="""distilbert""" __UpperCAmelCase : List[str] ={ """hidden_size""": """dim""", """num_attention_heads""": """n_heads""", """num_hidden_layers""": """n_layers""", } def __init__( self , __a=3_05_22 , __a=5_12 , __a=False , __a=6 , __a=12 , __a=7_68 , __a=4 * 7_68 , __a=0.1 , __a=0.1 , __a="gelu" , __a=0.0_2 , __a=0.1 , __a=0.2 , __a=0 , **__a , ): __lowerCAmelCase = vocab_size __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = sinusoidal_pos_embds __lowerCAmelCase = n_layers __lowerCAmelCase = n_heads __lowerCAmelCase = dim __lowerCAmelCase = hidden_dim __lowerCAmelCase = dropout __lowerCAmelCase = attention_dropout __lowerCAmelCase = activation __lowerCAmelCase = initializer_range __lowerCAmelCase = qa_dropout __lowerCAmelCase = seq_classif_dropout super().__init__(**__a , pad_token_id=__a ) class _UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' @property def snake_case ( self ): if self.task == "multiple-choice": __lowerCAmelCase = {0: "batch", 1: "choice", 2: "sequence"} else: __lowerCAmelCase = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
57
"""simple docstring""" class _UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' pass class _UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' pass class _UpperCamelCase : '''simple docstring''' def __init__( self ): __lowerCAmelCase = [ [], [], [], ] def snake_case ( self , __a , __a ): try: if len(self.queues[priority] ) >= 1_00: raise OverflowError("Maximum queue size is 100" ) self.queues[priority].append(__a ) except IndexError: raise ValueError("Valid priorities are 0, 1, and 2" ) def snake_case ( self ): for queue in self.queues: if queue: return queue.pop(0 ) raise UnderFlowError("All queues are empty" ) def __str__( self ): return "\n".join(f"Priority {i}: {q}" for i, q in enumerate(self.queues ) ) class _UpperCamelCase : '''simple docstring''' def __init__( self ): __lowerCAmelCase = [] def snake_case ( self , __a ): if len(self.queue ) == 1_00: raise OverFlowError("Maximum queue size is 100" ) self.queue.append(__a ) def snake_case ( self ): if not self.queue: raise UnderFlowError("The queue is empty" ) else: __lowerCAmelCase = min(self.queue ) self.queue.remove(__a ) return data def __str__( self ): return str(self.queue ) def _lowerCamelCase ( ): '''simple docstring''' __lowerCAmelCase = FixedPriorityQueue() fpq.enqueue(0 , 10 ) fpq.enqueue(1 , 70 ) fpq.enqueue(0 , 100 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 64 ) fpq.enqueue(0 , 128 ) print(_UpperCamelCase ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(_UpperCamelCase ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def _lowerCamelCase ( ): '''simple docstring''' __lowerCAmelCase = ElementPriorityQueue() epq.enqueue(10 ) epq.enqueue(70 ) epq.enqueue(100 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(64 ) epq.enqueue(128 ) print(_UpperCamelCase ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(_UpperCamelCase ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
57
1
'''simple docstring''' from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class lowercase ( A__ ): """simple docstring""" def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[str] = SMALL_MODEL_IDENTIFIER UpperCamelCase__ :Tuple = '''pt''' UpperCamelCase__ :Optional[Any] = '''tf''' def lowerCAmelCase__ ( self , UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ :Any = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(UpperCamelCase_ ) def lowerCAmelCase__ ( self , UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ :Optional[Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=UpperCamelCase_ ) model_tf.save_pretrained(UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :int = '''mock_framework''' # Framework provided - return whatever the user provides UpperCamelCase__ :Optional[int] = FeaturesManager.determine_framework(self.test_model , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(UpperCamelCase_ ) UpperCamelCase__ :Dict = FeaturesManager.determine_framework(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(UpperCamelCase_ ) UpperCamelCase__ :Optional[Any] = FeaturesManager.determine_framework(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(UpperCamelCase_ ) UpperCamelCase__ :str = FeaturesManager.determine_framework(UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(UpperCamelCase_ ) UpperCamelCase__ :List[Any] = FeaturesManager.determine_framework(UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(UpperCamelCase_ ): UpperCamelCase__ :List[str] = FeaturesManager.determine_framework(UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Optional[Any] = MagicMock(return_value=UpperCamelCase_ ) with patch('''transformers.onnx.features.is_tf_available''' , UpperCamelCase_ ): UpperCamelCase__ :Any = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(UpperCamelCase_ , self.framework_pt ) # PyTorch not in environment -> use TensorFlow UpperCamelCase__ :List[Any] = MagicMock(return_value=UpperCamelCase_ ) with patch('''transformers.onnx.features.is_torch_available''' , UpperCamelCase_ ): UpperCamelCase__ :Tuple = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(UpperCamelCase_ , self.framework_tf ) # Both in environment -> use PyTorch UpperCamelCase__ :List[Any] = MagicMock(return_value=UpperCamelCase_ ) UpperCamelCase__ :Optional[int] = MagicMock(return_value=UpperCamelCase_ ) with patch('''transformers.onnx.features.is_tf_available''' , UpperCamelCase_ ), patch( '''transformers.onnx.features.is_torch_available''' , UpperCamelCase_ ): UpperCamelCase__ :Optional[Any] = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(UpperCamelCase_ , self.framework_pt ) # Both not in environment -> raise error UpperCamelCase__ :List[Any] = MagicMock(return_value=UpperCamelCase_ ) UpperCamelCase__ :List[str] = MagicMock(return_value=UpperCamelCase_ ) with patch('''transformers.onnx.features.is_tf_available''' , UpperCamelCase_ ), patch( '''transformers.onnx.features.is_torch_available''' , UpperCamelCase_ ): with self.assertRaises(UpperCamelCase_ ): UpperCamelCase__ :int = FeaturesManager.determine_framework(self.test_model )
219
'''simple docstring''' import socket def a ( ) -> Dict: '''simple docstring''' UpperCamelCase__ :int = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) UpperCamelCase__ :List[Any] = socket.gethostname() UpperCamelCase__ :List[str] = 12312 sock.connect((host, port) ) sock.send(B'''Hello server!''' ) with open('''Received_file''' , '''wb''' ) as out_file: print('''File opened''' ) print('''Receiving data...''' ) while True: UpperCamelCase__ :str = sock.recv(1024 ) if not data: break out_file.write(__a ) print('''Successfully received the file''' ) sock.close() print('''Connection closed''' ) if __name__ == "__main__": main()
219
1
'''simple docstring''' import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def lowerCAmelCase_ ( ): '''simple docstring''' assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def lowerCAmelCase_ ( ): '''simple docstring''' A : str = '''mock-s3-bucket''' A : str = F's3://{mock_bucket}' A : Dict = extract_path_from_uri(snake_case__ ) assert dataset_path.startswith('''s3://''' ) is False A : Tuple = '''./local/path''' A : Dict = extract_path_from_uri(snake_case__ ) assert dataset_path == new_dataset_path def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : int = is_remote_filesystem(snake_case__ ) assert is_remote is True A : Union[str, Any] = fsspec.filesystem('''file''' ) A : Optional[int] = is_remote_filesystem(snake_case__ ) assert is_remote is False @pytest.mark.parametrize('''compression_fs_class''' , snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : Any = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file} A : Union[str, Any] = input_paths[compression_fs_class.protocol] if input_path is None: A : Any = F'for \'{compression_fs_class.protocol}\' compression protocol, ' if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(snake_case__ ) A : Tuple = fsspec.filesystem(compression_fs_class.protocol , fo=snake_case__ ) assert isinstance(snake_case__ , snake_case__ ) A : Dict = os.path.basename(snake_case__ ) A : str = expected_filename[: expected_filename.rindex('''.''' )] assert fs.glob('''*''' ) == [expected_filename] with fs.open(snake_case__ , '''r''' , encoding='''utf-8''' ) as f, open(snake_case__ , encoding='''utf-8''' ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : List[Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path} A : Optional[Any] = compressed_file_paths[protocol] A : List[Any] = '''dataset.jsonl''' A : List[str] = F'{protocol}://{member_file_path}::{compressed_file_path}' A, *A : Optional[int] = fsspec.get_fs_token_paths(snake_case__ ) assert fs.isfile(snake_case__ ) assert not fs.isfile('''non_existing_''' + member_file_path ) @pytest.mark.integration def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : int = hf_api.dataset_info(snake_case__ , token=snake_case__ ) A : Any = HfFileSystem(repo_info=snake_case__ , token=snake_case__ ) assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"] assert hffs.isdir('''data''' ) assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' ) with open(snake_case__ ) as f: assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read() def lowerCAmelCase_ ( ): '''simple docstring''' A : List[Any] = '''bz2''' # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(snake_case__ , snake_case__ , clobber=snake_case__ ) with pytest.warns(snake_case__ ) as warning_info: importlib.reload(datasets.filesystems ) assert len(snake_case__ ) == 1 assert ( str(warning_info[0].message ) == F'A filesystem protocol was already set for {protocol} and will be overwritten.' )
3
'''simple docstring''' import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask lowerCamelCase : List[Any] = logging.getLogger(__name__) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def __init__(self : Optional[Any] , UpperCamelCase : Any=-1 ): '''simple docstring''' lowercase__ = label_idx def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[Split, str] ): '''simple docstring''' if isinstance(UpperCamelCase , UpperCamelCase ): lowercase__ = mode.value lowercase__ = os.path.join(UpperCamelCase , f"{mode}.txt" ) lowercase__ = 1 lowercase__ = [] with open(UpperCamelCase , encoding='''utf-8''' ) as f: lowercase__ = [] lowercase__ = [] for line in f: if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) ) guid_index += 1 lowercase__ = [] lowercase__ = [] else: lowercase__ = line.split(''' ''' ) words.append(splits[0] ) if len(UpperCamelCase ) > 1: labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) ) else: # Examples could have no label for mode = "test" labels.append('''O''' ) if words: examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) ) return examples def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : TextIO , UpperCamelCase : TextIO , UpperCamelCase : List ): '''simple docstring''' lowercase__ = 0 for line in test_input_reader: if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n": writer.write(UpperCamelCase ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: lowercase__ = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n''' writer.write(UpperCamelCase ) else: logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] ) def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' if path: with open(UpperCamelCase , '''r''' ) as f: lowercase__ = f.read().splitlines() if "O" not in labels: lowercase__ = ['''O'''] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def __init__(self : List[Any] ): '''simple docstring''' super().__init__(label_idx=-2 ) def UpperCamelCase__ (self : List[Any] , UpperCamelCase : str ): '''simple docstring''' if path: with open(UpperCamelCase , '''r''' ) as f: lowercase__ = f.read().splitlines() if "O" not in labels: lowercase__ = ['''O'''] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def UpperCamelCase__ (self : Tuple , UpperCamelCase : int , UpperCamelCase : Union[Split, str] ): '''simple docstring''' if isinstance(UpperCamelCase , UpperCamelCase ): lowercase__ = mode.value lowercase__ = os.path.join(UpperCamelCase , f"{mode}.txt" ) lowercase__ = 1 lowercase__ = [] with open(UpperCamelCase , encoding='''utf-8''' ) as f: for sentence in parse_incr(UpperCamelCase ): lowercase__ = [] lowercase__ = [] for token in sentence: words.append(token['''form'''] ) labels.append(token['''upos'''] ) assert len(UpperCamelCase ) == len(UpperCamelCase ) if words: examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) ) guid_index += 1 return examples def UpperCamelCase__ (self : Tuple , UpperCamelCase : TextIO , UpperCamelCase : TextIO , UpperCamelCase : List ): '''simple docstring''' lowercase__ = 0 for sentence in parse_incr(UpperCamelCase ): lowercase__ = preds_list[example_id] lowercase__ = '''''' for token in sentence: out += f"{token['form']} ({token['upos']}|{s_p.pop(0 )}) " out += "\n" writer.write(UpperCamelCase ) example_id += 1 def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' if path: with open(UpperCamelCase , '''r''' ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
2
0
import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor _UpperCamelCase = logging.get_logger(__name__) class lowercase ( _UpperCamelCase ): '''simple docstring''' def __init__(self , *__a , **__a ) -> Union[str, Any]: """simple docstring""" warnings.warn( 'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use ChineseCLIPImageProcessor instead.' , _snake_case , ) super().__init__(*_snake_case , **_snake_case )
357
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def UpperCamelCase_( snake_case__: Union[str, Any] , snake_case__: Union[str, Any] ) -> Union[str, Any]: UpperCAmelCase__ = XCLIPTextConfig() # derive patch size from model name UpperCAmelCase__ = model_name.find('patch' ) UpperCAmelCase__ = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] ) UpperCAmelCase__ = XCLIPVisionConfig(patch_size=snake_case__ , num_frames=snake_case__ ) if "large" in model_name: UpperCAmelCase__ = 7_68 UpperCAmelCase__ = 30_72 UpperCAmelCase__ = 12 UpperCAmelCase__ = 10_24 UpperCAmelCase__ = 40_96 UpperCAmelCase__ = 16 UpperCAmelCase__ = 24 UpperCAmelCase__ = 7_68 UpperCAmelCase__ = 30_72 if model_name == "xclip-large-patch14-16-frames": UpperCAmelCase__ = 3_36 UpperCAmelCase__ = XCLIPConfig.from_text_vision_configs(snake_case__ , snake_case__ ) if "large" in model_name: UpperCAmelCase__ = 7_68 return config def UpperCamelCase_( snake_case__: Any ) -> Tuple: # text encoder if name == "token_embedding.weight": UpperCAmelCase__ = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' ) if name == "positional_embedding": UpperCAmelCase__ = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' ) if "ln_1" in name: UpperCAmelCase__ = name.replace('ln_1' , 'layer_norm1' ) if "ln_2" in name: UpperCAmelCase__ = name.replace('ln_2' , 'layer_norm2' ) if "c_fc" in name: UpperCAmelCase__ = name.replace('c_fc' , 'fc1' ) if "c_proj" in name: UpperCAmelCase__ = name.replace('c_proj' , 'fc2' ) if name.startswith('transformer.resblocks' ): UpperCAmelCase__ = name.replace('transformer.resblocks' , 'text_model.encoder.layers' ) if "attn.out_proj" in name and "message" not in name: UpperCAmelCase__ = name.replace('attn.out_proj' , 'self_attn.out_proj' ) if "ln_final" in name: UpperCAmelCase__ = name.replace('ln_final' , 'text_model.final_layer_norm' ) # visual encoder if name == "visual.class_embedding": UpperCAmelCase__ = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' ) if name == "visual.positional_embedding": UpperCAmelCase__ = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' ) if name.startswith('visual.transformer.resblocks' ): UpperCAmelCase__ = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' ) if "visual.conv1" in name: UpperCAmelCase__ = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' ) if "visual.ln_pre" in name: UpperCAmelCase__ = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' ) if "visual.ln_post" in name: UpperCAmelCase__ = name.replace('visual.ln_post' , 'vision_model.post_layernorm' ) if "visual.proj" in name: UpperCAmelCase__ = name.replace('visual.proj' , 'visual_projection.weight' ) if "text_projection" in name: UpperCAmelCase__ = name.replace('text_projection' , 'text_projection.weight' ) # things on top if "prompts_visual_proj" in name: UpperCAmelCase__ = name.replace('prompts_visual_proj' , 'prompts_visual_projection' ) if "prompts_visual_ln" in name: UpperCAmelCase__ = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' ) # mit if name == "mit.positional_embedding": UpperCAmelCase__ = name.replace('positional' , 'position' ) if name.startswith('mit.resblocks' ): UpperCAmelCase__ = name.replace('mit.resblocks' , 'mit.encoder.layers' ) # prompts generator if name.startswith('prompts_generator.norm' ): UpperCAmelCase__ = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' ) return name def UpperCamelCase_( snake_case__: Union[str, Any] , snake_case__: List[Any] ) -> Optional[Any]: for key in orig_state_dict.copy().keys(): UpperCAmelCase__ = orig_state_dict.pop(snake_case__ ) if "attn.in_proj" in key: UpperCAmelCase__ = key.split('.' ) if key.startswith('visual' ): UpperCAmelCase__ = key_split[3] UpperCAmelCase__ = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: UpperCAmelCase__ = val[ :dim, : ] UpperCAmelCase__ = val[ dim : dim * 2, : ] UpperCAmelCase__ = val[ -dim:, : ] else: UpperCAmelCase__ = val[ :dim ] UpperCAmelCase__ = val[ dim : dim * 2 ] UpperCAmelCase__ = val[ -dim: ] else: if "weight" in key: UpperCAmelCase__ = val[ :dim, : ] UpperCAmelCase__ = val[ dim : dim * 2, : ] UpperCAmelCase__ = val[ -dim:, : ] else: UpperCAmelCase__ = val[:dim] UpperCAmelCase__ = val[ dim : dim * 2 ] UpperCAmelCase__ = val[-dim:] elif key.startswith('mit' ): UpperCAmelCase__ = key_split[2] UpperCAmelCase__ = config.vision_config.mit_hidden_size if "weight" in key: UpperCAmelCase__ = val[:dim, :] UpperCAmelCase__ = val[dim : dim * 2, :] UpperCAmelCase__ = val[-dim:, :] else: UpperCAmelCase__ = val[:dim] UpperCAmelCase__ = val[dim : dim * 2] UpperCAmelCase__ = val[-dim:] else: UpperCAmelCase__ = key_split[2] UpperCAmelCase__ = config.text_config.hidden_size if "weight" in key: UpperCAmelCase__ = val[:dim, :] UpperCAmelCase__ = val[ dim : dim * 2, : ] UpperCAmelCase__ = val[-dim:, :] else: UpperCAmelCase__ = val[:dim] UpperCAmelCase__ = val[ dim : dim * 2 ] UpperCAmelCase__ = val[-dim:] else: UpperCAmelCase__ = rename_key(snake_case__ ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: UpperCAmelCase__ = val.T UpperCAmelCase__ = val return orig_state_dict def UpperCamelCase_( snake_case__: Tuple ) -> Optional[Any]: if num_frames == 8: UpperCAmelCase__ = 'eating_spaghetti_8_frames.npy' elif num_frames == 16: UpperCAmelCase__ = 'eating_spaghetti.npy' elif num_frames == 32: UpperCAmelCase__ = 'eating_spaghetti_32_frames.npy' UpperCAmelCase__ = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' , filename=snake_case__ , repo_type='dataset' , ) UpperCAmelCase__ = np.load(snake_case__ ) return list(snake_case__ ) def UpperCamelCase_( snake_case__: Tuple , snake_case__: str=None , snake_case__: Union[str, Any]=False ) -> List[Any]: UpperCAmelCase__ = { # fully supervised kinetics-400 checkpoints 'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth', 'xclip-base-patch32-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth' ), 'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth', 'xclip-base-patch16-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth' ), 'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb', 'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f', # fully supervised kinetics-600 checkpoints 'xclip-base-patch16-kinetics-600': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth' ), 'xclip-base-patch16-kinetics-600-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth' ), 'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be', # few shot 'xclip-base-patch16-hmdb-2-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth' ), 'xclip-base-patch16-hmdb-4-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth' ), 'xclip-base-patch16-hmdb-8-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth' ), 'xclip-base-patch16-hmdb-16-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth' ), 'xclip-base-patch16-ucf-2-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth' ), 'xclip-base-patch16-ucf-4-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth' ), 'xclip-base-patch16-ucf-8-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth' ), 'xclip-base-patch16-ucf-16-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth' ), # zero shot 'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth', } UpperCAmelCase__ = model_to_url[model_name] UpperCAmelCase__ = 8 if "16-frames" in model_name: UpperCAmelCase__ = 16 elif "shot" in model_name: UpperCAmelCase__ = 32 UpperCAmelCase__ = get_xclip_config(snake_case__ , snake_case__ ) UpperCAmelCase__ = XCLIPModel(snake_case__ ) model.eval() if "drive" in checkpoint_url: UpperCAmelCase__ = 'pytorch_model.bin' gdown.cached_download(snake_case__ , snake_case__ , quiet=snake_case__ ) UpperCAmelCase__ = torch.load(snake_case__ , map_location='cpu' )['model'] else: UpperCAmelCase__ = torch.hub.load_state_dict_from_url(snake_case__ )['model'] UpperCAmelCase__ = convert_state_dict(snake_case__ , snake_case__ ) UpperCAmelCase__ = XCLIPModel(snake_case__ ) UpperCAmelCase__ , UpperCAmelCase__ = model.load_state_dict(snake_case__ , strict=snake_case__ ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() UpperCAmelCase__ = 3_36 if model_name == 'xclip-large-patch14-16-frames' else 2_24 UpperCAmelCase__ = VideoMAEImageProcessor(size=snake_case__ ) UpperCAmelCase__ = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' ) UpperCAmelCase__ = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' ) UpperCAmelCase__ = XCLIPProcessor(image_processor=snake_case__ , tokenizer=snake_case__ ) UpperCAmelCase__ = prepare_video(snake_case__ ) UpperCAmelCase__ = processor( text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=snake_case__ , return_tensors='pt' , padding=snake_case__ ) print('Shape of pixel values:' , inputs.pixel_values.shape ) with torch.no_grad(): UpperCAmelCase__ = model(**snake_case__ ) # Verify outputs UpperCAmelCase__ = outputs.logits_per_video UpperCAmelCase__ = logits_per_video.softmax(dim=1 ) print('Probs:' , snake_case__ ) # kinetics-400 if model_name == "xclip-base-patch32": UpperCAmelCase__ = torch.tensor([[0.0_0_1_9, 0.9_9_5_1, 0.0_0_3_0]] ) elif model_name == "xclip-base-patch32-16-frames": UpperCAmelCase__ = torch.tensor([[7.0_999e-04, 9.9_883e-01, 4.5_580e-04]] ) elif model_name == "xclip-base-patch16": UpperCAmelCase__ = torch.tensor([[0.0_0_8_3, 0.9_6_8_1, 0.0_2_3_6]] ) elif model_name == "xclip-base-patch16-16-frames": UpperCAmelCase__ = torch.tensor([[7.6_937e-04, 9.9_728e-01, 1.9_473e-03]] ) elif model_name == "xclip-large-patch14": UpperCAmelCase__ = torch.tensor([[0.0_0_6_2, 0.9_8_6_4, 0.0_0_7_5]] ) elif model_name == "xclip-large-patch14-16-frames": UpperCAmelCase__ = torch.tensor([[3.3_877e-04, 9.9_937e-01, 2.8_888e-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": UpperCAmelCase__ = torch.tensor([[0.0_5_5_5, 0.8_9_1_4, 0.0_5_3_1]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": UpperCAmelCase__ = torch.tensor([[3.8_554e-04, 9.9_929e-01, 3.2_754e-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": UpperCAmelCase__ = torch.tensor([[0.0_0_3_6, 0.9_9_2_0, 0.0_0_4_5]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": UpperCAmelCase__ = torch.tensor([[7.1_890e-06, 9.9_994e-01, 5.6_559e-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": UpperCAmelCase__ = torch.tensor([[1.0_320e-05, 9.9_993e-01, 6.2_435e-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": UpperCAmelCase__ = torch.tensor([[4.1_377e-06, 9.9_990e-01, 9.8_386e-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": UpperCAmelCase__ = torch.tensor([[4.1_347e-05, 9.9_962e-01, 3.3_411e-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": UpperCAmelCase__ = torch.tensor([[8.5_857e-05, 9.9_928e-01, 6.3_291e-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": UpperCAmelCase__ = torch.tensor([[8.5_857e-05, 9.9_928e-01, 6.3_291e-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": UpperCAmelCase__ = torch.tensor([[0.0_0_2_7, 0.9_9_0_4, 0.0_0_7_0]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": UpperCAmelCase__ = torch.tensor([[9.8_219e-04, 9.9_593e-01, 3.0_863e-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": UpperCAmelCase__ = torch.tensor([[3.5_082e-04, 9.9_785e-01, 1.7_966e-03]] ) else: raise ValueError(f"Model name {model_name} not supported" ) assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(snake_case__ ) if push_to_hub: print('Pushing model, processor and slow tokenizer files to the hub...' ) model.push_to_hub(snake_case__ , organization='nielsr' ) processor.push_to_hub(snake_case__ , organization='nielsr' ) slow_tokenizer.push_to_hub(snake_case__ , organization='nielsr' ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''xclip-base-patch32''', type=str, help='''Name of the model.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _UpperCamelCase = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
335
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule _A : Optional[Any] ={'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys _A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
41
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys __lowerCAmelCase : int = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8') __lowerCAmelCase : Any = ( subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode('utf-8').split() ) __lowerCAmelCase : str = '|'.join(sys.argv[1:]) __lowerCAmelCase : Tuple = re.compile(RF'''^({joined_dirs}).*?\.py$''') __lowerCAmelCase : Union[str, Any] = [x for x in modified_files if regex.match(x)] print(' '.join(relevant_modified_files), end='')
88
0
'''simple docstring''' from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class lowerCamelCase_ ( _lowerCamelCase ): lowerCAmelCase__ = 'Salesforce/blip-image-captioning-base' lowerCAmelCase__ = ( 'This is a tool that generates a description of an image. It takes an input named `image` which should be the ' 'image to caption, and returns a text that contains the description in English.' ) lowerCAmelCase__ = 'image_captioner' lowerCAmelCase__ = AutoModelForVisionaSeq lowerCAmelCase__ = ['image'] lowerCAmelCase__ = ['text'] def __init__( self : int , *_A : Any , **_A : Optional[int] ): '''simple docstring''' requires_backends(self , ['''vision'''] ) super().__init__(*lowercase_ , **lowercase_ ) def lowercase_ ( self : int , _A : "Image" ): '''simple docstring''' return self.pre_processor(images=lowercase_ , return_tensors='''pt''' ) def lowercase_ ( self : str , _A : Optional[int] ): '''simple docstring''' return self.model.generate(**lowercase_ ) def lowercase_ ( self : Tuple , _A : Optional[Any] ): '''simple docstring''' return self.pre_processor.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )[0].strip()
356
'''simple docstring''' import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''label_embs_concat''': '''label_embeddings_concat''', '''mask_emb''': '''masked_spec_embed''', '''spk_proj''': '''speaker_proj''', } UpperCamelCase__ = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', '''label_embeddings_concat''', '''speaker_proj''', '''layer_norm_for_extract''', ] def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]: for attribute in key.split('''.''' ): UpperCAmelCase__ : Optional[int] = getattr(lowerCAmelCase__ , lowerCAmelCase__ ) if weight_type is not None: UpperCAmelCase__ : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape else: UpperCAmelCase__ : Union[str, Any] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": UpperCAmelCase__ : int = value elif weight_type == "weight_g": UpperCAmelCase__ : Dict = value elif weight_type == "weight_v": UpperCAmelCase__ : List[str] = value elif weight_type == "bias": UpperCAmelCase__ : Tuple = value else: UpperCAmelCase__ : Tuple = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict: UpperCAmelCase__ : Optional[int] = [] UpperCAmelCase__ : Dict = fairseq_model.state_dict() UpperCAmelCase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase__ : Any = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , ) UpperCAmelCase__ : str = True else: for key, mapped_key in MAPPING.items(): UpperCAmelCase__ : List[str] = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key): # special case since naming is very similar continue UpperCAmelCase__ : Optional[int] = True if "*" in mapped_key: UpperCAmelCase__ : str = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2] UpperCAmelCase__ : Optional[int] = mapped_key.replace('''*''' , lowerCAmelCase__ ) if "weight_g" in name: UpperCAmelCase__ : List[str] = '''weight_g''' elif "weight_v" in name: UpperCAmelCase__ : Dict = '''weight_v''' elif "bias" in name: UpperCAmelCase__ : Optional[int] = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCAmelCase__ : Tuple = '''weight''' else: UpperCAmelCase__ : Optional[Any] = None set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) continue if not is_used: unused_weights.append(lowerCAmelCase__ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]: UpperCAmelCase__ : Tuple = full_name.split('''conv_layers.''' )[-1] UpperCAmelCase__ : Optional[Any] = name.split('''.''' ) UpperCAmelCase__ : Union[str, Any] = int(items[0] ) UpperCAmelCase__ : Tuple = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) UpperCAmelCase__ : str = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) UpperCAmelCase__ : Optional[int] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) UpperCAmelCase__ : List[str] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) UpperCAmelCase__ : Optional[Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowerCAmelCase__ ) @torch.no_grad() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True ) -> Any: if config_path is not None: UpperCAmelCase__ : Any = UniSpeechSatConfig.from_pretrained(lowerCAmelCase__ ) else: UpperCAmelCase__ : int = UniSpeechSatConfig() UpperCAmelCase__ : Tuple = '''''' if is_finetuned: UpperCAmelCase__ : Optional[int] = UniSpeechSatForCTC(lowerCAmelCase__ ) else: UpperCAmelCase__ : List[Any] = UniSpeechSatForPreTraining(lowerCAmelCase__ ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) UpperCAmelCase__ : Union[str, Any] = model[0].eval() recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ ) hf_wavavec.save_pretrained(lowerCAmelCase__ ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) UpperCamelCase__ = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
299
0
"""simple docstring""" import argparse import os import torch from transformers.utils import WEIGHTS_NAME A__ : Union[str, Any] = ['small', 'medium', 'large'] A__ : Tuple = 'lm_head.decoder.weight' A__ : int = 'lm_head.weight' def _snake_case ( lowerCamelCase__ : int , lowerCamelCase__ : List[str] ) -> List[str]: lowerCamelCase_ : int =torch.load(lowerCamelCase__ ) lowerCamelCase_ : str =d.pop(lowerCamelCase__ ) os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ ) torch.save(lowerCamelCase__ , os.path.join(lowerCamelCase__ , lowerCamelCase__ ) ) if __name__ == "__main__": A__ : Optional[int] = argparse.ArgumentParser() parser.add_argument('--dialogpt_path', default='.', type=str) A__ : List[str] = parser.parse_args() for MODEL in DIALOGPT_MODELS: A__ : Dict = os.path.join(args.dialogpt_path, f'{MODEL}_ft.pkl') A__ : Union[str, Any] = f'./DialoGPT-{MODEL}' convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
144
"""simple docstring""" from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def __magic_name__ ( lowercase ): return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )} def __magic_name__ ( ): SCREAMING_SNAKE_CASE_: List[str] =ArgumentParser( """HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=lowercase ) SCREAMING_SNAKE_CASE_: List[Any] =parser.add_subparsers(help="""datasets-cli command helpers""" ) set_verbosity_info() # Register commands ConvertCommand.register_subcommand(lowercase ) EnvironmentCommand.register_subcommand(lowercase ) TestCommand.register_subcommand(lowercase ) RunBeamCommand.register_subcommand(lowercase ) DummyDataCommand.register_subcommand(lowercase ) # Parse args SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =parser.parse_known_args() if not hasattr(lowercase , """func""" ): parser.print_help() exit(1 ) SCREAMING_SNAKE_CASE_: Dict =parse_unknown_args(lowercase ) # Run SCREAMING_SNAKE_CASE_: Tuple =args.func(lowercase , **lowercase ) service.run() if __name__ == "__main__": main()
173
0
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class UpperCamelCase ( unittest.TestCase ): """simple docstring""" def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=7 ,UpperCAmelCase_=3 ,UpperCAmelCase_=18 ,UpperCAmelCase_=30 ,UpperCAmelCase_=4_00 ,UpperCAmelCase_=True ,UpperCAmelCase_=None ,UpperCAmelCase_=True ,UpperCAmelCase_=None ,UpperCAmelCase_=True ,UpperCAmelCase_=[0.48145466, 0.4578275, 0.40821073] ,UpperCAmelCase_=[0.26862954, 0.26130258, 0.27577711] ,UpperCAmelCase_=True ,): _lowercase : Optional[Any] = size if size is not None else {"""height""": 2_24, """width""": 2_24} _lowercase : Any = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} _lowercase : int = parent _lowercase : Tuple = batch_size _lowercase : int = num_channels _lowercase : Dict = image_size _lowercase : Any = min_resolution _lowercase : List[str] = max_resolution _lowercase : Any = do_resize _lowercase : Optional[int] = size _lowercase : Dict = do_center_crop _lowercase : Optional[Any] = crop_size _lowercase : Tuple = do_normalize _lowercase : List[str] = image_mean _lowercase : str = image_std _lowercase : Any = do_convert_rgb def lowerCamelCase__ ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def lowerCamelCase__ ( self ,UpperCAmelCase_=False ,UpperCAmelCase_=False ,UpperCAmelCase_=False ): assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: _lowercase : Union[str, Any] = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 2_55 ,size=(self.num_channels, self.max_resolution, self.max_resolution) ,dtype=np.uinta ) ) else: _lowercase : Optional[int] = [] for i in range(self.batch_size ): _lowercase , _lowercase : Optional[Any] = np.random.choice(np.arange(self.min_resolution ,self.max_resolution ) ,2 ) image_inputs.append(np.random.randint(2_55 ,size=(self.num_channels, width, height) ,dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension _lowercase : Optional[int] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ ,0 ,-1 ) ) for x in image_inputs] if torchify: _lowercase : Union[str, Any] = [torch.from_numpy(UpperCAmelCase_ ) for x in image_inputs] return image_inputs @require_torch @require_vision class UpperCamelCase ( snake_case , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = ChineseCLIPImageProcessor if is_vision_available() else None def lowerCamelCase__ ( self ): _lowercase : int = ChineseCLIPImageProcessingTester(self ,do_center_crop=UpperCAmelCase_ ) @property def lowerCamelCase__ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase__ ( self ): _lowercase : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""do_resize""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""size""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""do_center_crop""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""center_crop""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""do_normalize""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""image_mean""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""image_std""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""do_convert_rgb""" ) ) def lowerCamelCase__ ( self ): _lowercase : Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""height""": 2_24, """width""": 2_24} ) self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} ) _lowercase : str = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} ) def lowerCamelCase__ ( self ): pass def lowerCamelCase__ ( self ): # Initialize image_processing _lowercase : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowercase : List[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ ,Image.Image ) # Test not batched input _lowercase : List[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched _lowercase : Tuple = image_processing(UpperCAmelCase_ ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def lowerCamelCase__ ( self ): # Initialize image_processing _lowercase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _lowercase : Any = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCAmelCase_ ,numpify=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ ,np.ndarray ) # Test not batched input _lowercase : Optional[int] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched _lowercase : str = image_processing(UpperCAmelCase_ ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def lowerCamelCase__ ( self ): # Initialize image_processing _lowercase : int = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _lowercase : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCAmelCase_ ,torchify=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ ,torch.Tensor ) # Test not batched input _lowercase : int = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched _lowercase : Any = image_processing(UpperCAmelCase_ ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) @require_torch @require_vision class UpperCamelCase ( snake_case , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = ChineseCLIPImageProcessor if is_vision_available() else None def lowerCamelCase__ ( self ): _lowercase : Any = ChineseCLIPImageProcessingTester(self ,num_channels=4 ,do_center_crop=UpperCAmelCase_ ) _lowercase : List[Any] = 3 @property def lowerCamelCase__ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase__ ( self ): _lowercase : List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""do_resize""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""size""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""do_center_crop""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""center_crop""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""do_normalize""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""image_mean""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""image_std""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""do_convert_rgb""" ) ) def lowerCamelCase__ ( self ): pass def lowerCamelCase__ ( self ): # Initialize image_processing _lowercase : int = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowercase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ ,Image.Image ) # Test not batched input _lowercase : str = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched _lowercase : List[Any] = image_processing(UpperCAmelCase_ ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,)
336
"""simple docstring""" def __SCREAMING_SNAKE_CASE ( ): return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] UpperCAmelCase: Any = generate_large_matrix() UpperCAmelCase: Dict = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): assert all(row == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for row in grid ) assert all(list(__UpperCAmelCase ) == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for col in zip(*__UpperCAmelCase ) ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : Tuple = 0 _lowercase : List[Any] = len(__UpperCAmelCase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: _lowercase : Tuple = (left + right) // 2 _lowercase : List[Any] = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: _lowercase : Dict = mid + 1 else: _lowercase : Dict = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : Any = 0 _lowercase : Optional[int] = len(grid[0] ) for i in range(len(__UpperCAmelCase ) ): _lowercase : Union[str, Any] = find_negative_index(grid[i][:bound] ) total += bound return (len(__UpperCAmelCase ) * len(grid[0] )) - total def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): return len([number for row in grid for number in row if number < 0] ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : Tuple = 0 for row in grid: for i, number in enumerate(__UpperCAmelCase ): if number < 0: total += len(__UpperCAmelCase ) - i break return total def __SCREAMING_SNAKE_CASE ( ): from timeit import timeit print("""Running benchmarks""" ) _lowercase : Tuple = ( """from __main__ import count_negatives_binary_search, """ """count_negatives_brute_force, count_negatives_brute_force_with_break, grid""" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): _lowercase : Dict = timeit(F"""{func}(grid=grid)""" , setup=__UpperCAmelCase , number=500 ) print(F"""{func}() took {time:0.4f} seconds""" ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
336
1
import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL _UpperCAmelCase : str = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") def A ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=False , ) -> Dict: '''simple docstring''' output_path.parent.mkdir(parents=lowercase , exist_ok=lowercase ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( lowercase , lowercase , f=output_path.as_posix() , input_names=lowercase , output_names=lowercase , dynamic_axes=lowercase , do_constant_folding=lowercase , use_external_data_format=lowercase , enable_onnx_checker=lowercase , opset_version=lowercase , ) else: export( lowercase , lowercase , f=output_path.as_posix() , input_names=lowercase , output_names=lowercase , dynamic_axes=lowercase , do_constant_folding=lowercase , opset_version=lowercase , ) @torch.no_grad() def A ( lowercase , lowercase , lowercase , lowercase = False ) -> Tuple: '''simple docstring''' UpperCamelCase = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): UpperCamelCase = 'cuda' elif fpaa and not torch.cuda.is_available(): raise ValueError('`float16` model export is only supported on GPUs with CUDA' ) else: UpperCamelCase = 'cpu' UpperCamelCase = Path(lowercase ) # VAE DECODER UpperCamelCase = AutoencoderKL.from_pretrained(model_path + '/vae' ) UpperCamelCase = vae_decoder.config.latent_channels # forward only through the decoder part UpperCamelCase = vae_decoder.decode onnx_export( lowercase , model_args=( torch.randn(1 , lowercase , 25 , 25 ).to(device=lowercase , dtype=lowercase ), False, ) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={ 'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'}, } , opset=lowercase , ) del vae_decoder if __name__ == "__main__": _UpperCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument( "--model_path", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument( "--opset", default=14, type=int, help="The version of the ONNX operator set to use.", ) parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") _UpperCAmelCase : Optional[int] = parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print("SD: Done: ONNX")
222
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class lowercase ( unittest.TestCase ): def __UpperCamelCase ( self , A_ ) -> List[str]: """simple docstring""" for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ): UpperCamelCase = model_result['result'][batch_size][sequence_length] self.assertIsNotNone(A_ ) def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = 'sshleifer/tiny-gpt2' UpperCamelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , ) UpperCamelCase = PyTorchBenchmark(A_ ) UpperCamelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __UpperCamelCase ( self ) -> Any: """simple docstring""" UpperCamelCase = 'sgugger/tiny-distilbert-classification' UpperCamelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , only_pretrain_model=A_ , ) UpperCamelCase = PyTorchBenchmark(A_ ) UpperCamelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __UpperCamelCase ( self ) -> Any: """simple docstring""" UpperCamelCase = 'sshleifer/tiny-gpt2' UpperCamelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A_ , inference=A_ , torchscript=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , ) UpperCamelCase = PyTorchBenchmark(A_ ) UpperCamelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' ) def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase = 'sshleifer/tiny-gpt2' UpperCamelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A_ , inference=A_ , fpaa=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , ) UpperCamelCase = PyTorchBenchmark(A_ ) UpperCamelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __UpperCamelCase ( self ) -> str: """simple docstring""" UpperCamelCase = 'sshleifer/tiny-gpt2' UpperCamelCase = AutoConfig.from_pretrained(A_ ) # set architectures equal to `None` UpperCamelCase = None UpperCamelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , ) UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] ) UpperCamelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase = 'sshleifer/tiny-gpt2' UpperCamelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , ) UpperCamelCase = PyTorchBenchmark(A_ ) UpperCamelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' ) def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = 'sshleifer/tiny-gpt2' UpperCamelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=A_ , multi_process=A_ , ) UpperCamelCase = PyTorchBenchmark(A_ ) UpperCamelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = 'sshleifer/tiny-gpt2' UpperCamelCase = AutoConfig.from_pretrained(A_ ) UpperCamelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , ) UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] ) UpperCamelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = 'sshleifer/tinier_bart' UpperCamelCase = AutoConfig.from_pretrained(A_ ) UpperCamelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , ) UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] ) UpperCamelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = 'sshleifer/tiny-gpt2' UpperCamelCase = AutoConfig.from_pretrained(A_ ) UpperCamelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , ) UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] ) UpperCamelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def __UpperCamelCase ( self ) -> Any: """simple docstring""" UpperCamelCase = 'sshleifer/tinier_bart' UpperCamelCase = AutoConfig.from_pretrained(A_ ) UpperCamelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , ) UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] ) UpperCamelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def __UpperCamelCase ( self ) -> str: """simple docstring""" UpperCamelCase = 'sshleifer/tiny-gpt2' with tempfile.TemporaryDirectory() as tmp_dir: UpperCamelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A_ , inference=A_ , save_to_csv=A_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A_ , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(A_ , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(A_ , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(A_ , 'train_time.csv' ) , env_info_csv_file=os.path.join(A_ , 'env.csv' ) , multi_process=A_ , ) UpperCamelCase = PyTorchBenchmark(A_ ) benchmark.run() self.assertTrue(Path(os.path.join(A_ , 'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(A_ , 'train_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(A_ , 'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(A_ , 'train_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(A_ , 'env.csv' ) ).exists() ) def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = 'sshleifer/tiny-gpt2' def _check_summary_is_not_empty(A_ ): self.assertTrue(hasattr(A_ , 'sequential' ) ) self.assertTrue(hasattr(A_ , 'cumulative' ) ) self.assertTrue(hasattr(A_ , 'current' ) ) self.assertTrue(hasattr(A_ , 'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: UpperCamelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A_ , 'log.txt' ) , log_print=A_ , trace_memory_line_by_line=A_ , multi_process=A_ , ) UpperCamelCase = PyTorchBenchmark(A_ ) UpperCamelCase = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(A_ , 'log.txt' ) ).exists() )
222
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __SCREAMING_SNAKE_CASE = { """configuration_deberta""": ["""DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DebertaConfig""", """DebertaOnnxConfig"""], """tokenization_deberta""": ["""DebertaTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = ["""DebertaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """DebertaForMaskedLM""", """DebertaForQuestionAnswering""", """DebertaForSequenceClassification""", """DebertaForTokenClassification""", """DebertaModel""", """DebertaPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFDebertaForMaskedLM""", """TFDebertaForQuestionAnswering""", """TFDebertaForSequenceClassification""", """TFDebertaForTokenClassification""", """TFDebertaModel""", """TFDebertaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
356
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __SCREAMING_SNAKE_CASE = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """IBertForMaskedLM""", """IBertForMultipleChoice""", """IBertForQuestionAnswering""", """IBertForSequenceClassification""", """IBertForTokenClassification""", """IBertModel""", """IBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
256
0
import warnings from functools import wraps from typing import Callable def A_ ( _UpperCAmelCase ): @wraps(_UpperCAmelCase ) def _inner_fn(*_UpperCAmelCase , **_UpperCAmelCase ): warnings.warn( (f"'{fn.__name__}' is experimental and might be subject to breaking changes in the future.") , _UpperCAmelCase , ) return fn(*_UpperCAmelCase , **_UpperCAmelCase ) return _inner_fn
13
from math import ceil def __UpperCAmelCase ( __a : int = 1_001 ) -> int: """simple docstring""" _a : List[Any] = 1 for i in range(1 ,int(ceil(n / 2.0 ) ) ): _a : Optional[Any] = 2 * i + 1 _a : Optional[int] = 2 * i _a : str = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: a__ = int(sys.argv[1]) print(solution(n)) except ValueError: print('''Invalid entry - please enter a number''')
235
0
"""simple docstring""" import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast @require_vision class _A ( unittest.TestCase ): def A__ ( self ): """simple docstring""" lowercase = tempfile.mkdtemp() lowercase = BlipImageProcessor() lowercase = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" ) lowercase = BlipaProcessor(__lowerCAmelCase , __lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) def A__ ( self , **__lowerCAmelCase ): """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).tokenizer def A__ ( self , **__lowerCAmelCase ): """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).image_processor def A__ ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def A__ ( self ): """simple docstring""" lowercase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowercase = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def A__ ( self ): """simple docstring""" lowercase = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) lowercase = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 ) lowercase = BlipaProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCAmelCase ) def A__ ( self ): """simple docstring""" lowercase = self.get_image_processor() lowercase = self.get_tokenizer() lowercase = BlipaProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) lowercase = self.prepare_image_inputs() lowercase = image_processor(__lowerCAmelCase , return_tensors="""np""" ) lowercase = processor(images=__lowerCAmelCase , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def A__ ( self ): """simple docstring""" lowercase = self.get_image_processor() lowercase = self.get_tokenizer() lowercase = BlipaProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) lowercase = """lower newer""" lowercase = processor(text=__lowerCAmelCase ) lowercase = tokenizer(__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def A__ ( self ): """simple docstring""" lowercase = self.get_image_processor() lowercase = self.get_tokenizer() lowercase = BlipaProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) lowercase = """lower newer""" lowercase = self.prepare_image_inputs() lowercase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] ) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase ): processor() def A__ ( self ): """simple docstring""" lowercase = self.get_image_processor() lowercase = self.get_tokenizer() lowercase = BlipaProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase = processor.batch_decode(__lowerCAmelCase ) lowercase = tokenizer.batch_decode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( self ): """simple docstring""" lowercase = self.get_image_processor() lowercase = self.get_tokenizer() lowercase = BlipaProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) lowercase = """lower newer""" lowercase = self.prepare_image_inputs() lowercase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
32
"""simple docstring""" def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> bool: '''simple docstring''' lowercase = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(2_7)) print(perfect_cube(4))
32
1
"""simple docstring""" import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version _lowercase : Any = version.parse(importlib_metadata.version("nltk")) if NLTK_VERSION >= version.Version("3.6.4"): from nltk import word_tokenize _lowercase : Dict = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n" _lowercase : List[str] = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n" _lowercase : Union[str, Any] = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __SCREAMING_SNAKE_CASE ( datasets.Metric ): '''simple docstring''' def snake_case ( self : List[Any] )-> Dict: return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': datasets.Value('''string''', id='''sequence''' ), '''references''': datasets.Value('''string''', id='''sequence''' ), } ), codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''], reference_urls=[ '''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''', '''https://en.wikipedia.org/wiki/METEOR''', ], ) def snake_case ( self : Any, lowerCamelCase : List[str] )-> List[Any]: import nltk nltk.download('''wordnet''' ) if NLTK_VERSION >= version.Version('''3.6.5''' ): nltk.download('''punkt''' ) if NLTK_VERSION >= version.Version('''3.6.6''' ): nltk.download('''omw-1.4''' ) def snake_case ( self : int, lowerCamelCase : List[Any], lowerCamelCase : List[str], lowerCamelCase : Tuple=0.9, lowerCamelCase : int=3, lowerCamelCase : List[Any]=0.5 )-> Any: if NLTK_VERSION >= version.Version('''3.6.5''' ): lowerCamelCase__ : List[str] =[ meteor_score.single_meteor_score( word_tokenize(lowerCamelCase ), word_tokenize(lowerCamelCase ), alpha=lowerCamelCase, beta=lowerCamelCase, gamma=lowerCamelCase ) for ref, pred in zip(lowerCamelCase, lowerCamelCase ) ] else: lowerCamelCase__ : Optional[Any] =[ meteor_score.single_meteor_score(lowerCamelCase, lowerCamelCase, alpha=lowerCamelCase, beta=lowerCamelCase, gamma=lowerCamelCase ) for ref, pred in zip(lowerCamelCase, lowerCamelCase ) ] return {"meteor": np.mean(lowerCamelCase )}
238
"""simple docstring""" from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging _lowercase : Tuple = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' _a = ['pixel_values'] def __init__( self : Optional[Any], lowerCamelCase : bool = True, lowerCamelCase : Union[int, float] = 1 / 255, lowerCamelCase : bool = True, lowerCamelCase : int = 8, **lowerCamelCase : Tuple, )-> None: super().__init__(**lowerCamelCase ) lowerCamelCase__ : int =do_rescale lowerCamelCase__ : Dict =rescale_factor lowerCamelCase__ : Union[str, Any] =do_pad lowerCamelCase__ : Union[str, Any] =pad_size def snake_case ( self : int, lowerCamelCase : np.ndarray, lowerCamelCase : float, lowerCamelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCamelCase : int )-> np.ndarray: return rescale(lowerCamelCase, scale=lowerCamelCase, data_format=lowerCamelCase, **lowerCamelCase ) def snake_case ( self : Optional[Any], lowerCamelCase : np.ndarray, lowerCamelCase : int, lowerCamelCase : Optional[Union[str, ChannelDimension]] = None )-> List[Any]: lowerCamelCase__ , lowerCamelCase__ : Optional[int] =get_image_size(lowerCamelCase ) lowerCamelCase__ : List[str] =(old_height // size + 1) * size - old_height lowerCamelCase__ : List[str] =(old_width // size + 1) * size - old_width return pad(lowerCamelCase, ((0, pad_height), (0, pad_width)), mode='''symmetric''', data_format=lowerCamelCase ) def snake_case ( self : List[Any], lowerCamelCase : ImageInput, lowerCamelCase : Optional[bool] = None, lowerCamelCase : Optional[float] = None, lowerCamelCase : Optional[bool] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, lowerCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST, **lowerCamelCase : Union[str, Any], )-> Dict: lowerCamelCase__ : List[str] =do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase__ : Tuple =rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase__ : str =do_pad if do_pad is not None else self.do_pad lowerCamelCase__ : int =pad_size if pad_size is not None else self.pad_size lowerCamelCase__ : Optional[int] =make_list_of_images(lowerCamelCase ) if not valid_images(lowerCamelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) # All transformations expect numpy arrays. lowerCamelCase__ : Tuple =[to_numpy_array(lowerCamelCase ) for image in images] if do_rescale: lowerCamelCase__ : Tuple =[self.rescale(image=lowerCamelCase, scale=lowerCamelCase ) for image in images] if do_pad: lowerCamelCase__ : Tuple =[self.pad(lowerCamelCase, size=lowerCamelCase ) for image in images] lowerCamelCase__ : int =[to_channel_dimension_format(lowerCamelCase, lowerCamelCase ) for image in images] lowerCamelCase__ : Dict ={'''pixel_values''': images} return BatchFeature(data=lowerCamelCase, tensor_type=lowerCamelCase )
238
1
"""simple docstring""" import numpy # List of input, output pairs lowercase__ = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) lowercase__ = (((515, 22, 13), 555), ((61, 35, 49), 150)) lowercase__ = [2, 4, 1, 5] lowercase__ = len(train_data) lowercase__ = 0.009 def _snake_case ( lowercase__ , lowercase__="train" ): return calculate_hypothesis_value(lowercase__ , lowercase__ ) - output( lowercase__ , lowercase__ ) def _snake_case ( lowercase__ ): _lowerCamelCase : int = 0 for i in range(len(lowercase__ ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def _snake_case ( lowercase__ , lowercase__ ): if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def _snake_case ( lowercase__ , lowercase__ ): if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def _snake_case ( lowercase__ , lowercase__=m ): _lowerCamelCase : Optional[int] = 0 for i in range(lowercase__ ): if index == -1: summation_value += _error(lowercase__ ) else: summation_value += _error(lowercase__ ) * train_data[i][0][index] return summation_value def _snake_case ( lowercase__ ): _lowerCamelCase : Dict = summation_of_cost_derivative(lowercase__ , lowercase__ ) / m return cost_derivative_value def _snake_case ( ): global parameter_vector # Tune these values to set a tolerance value for predicted output _lowerCamelCase : Tuple = 0.0_0_0_0_0_2 _lowerCamelCase : Optional[int] = 0 _lowerCamelCase : Optional[int] = 0 while True: j += 1 _lowerCamelCase : List[Any] = [0, 0, 0, 0] for i in range(0 , len(lowercase__ ) ): _lowerCamelCase : List[Any] = get_cost_derivative(i - 1 ) _lowerCamelCase : List[str] = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( lowercase__ , lowercase__ , atol=lowercase__ , rtol=lowercase__ , ): break _lowerCamelCase : List[Any] = temp_parameter_vector print(('Number of iterations:', j) ) def _snake_case ( ): for i in range(len(lowercase__ ) ): print(('Actual output value:', output(lowercase__ , 'test' )) ) print(('Hypothesis output:', calculate_hypothesis_value(lowercase__ , 'test' )) ) if __name__ == "__main__": run_gradient_descent() print("""\nTesting gradient descent for a linear hypothesis function.\n""") test_gradient_descent()
361
"""simple docstring""" from __future__ import annotations import unittest import numpy as np from transformers import OPTConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel def _snake_case ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None ): if attention_mask is None: _lowerCamelCase : List[str] = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta ) return {"input_ids": input_ids, "attention_mask": attention_mask} @require_tf class lowerCAmelCase__ : '''simple docstring''' lowerCamelCase__ = OPTConfig lowerCamelCase__ = {} lowerCamelCase__ = """gelu""" def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=16 , lowercase=2 , lowercase=4 , lowercase=4 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , lowercase=16 , lowercase=16 , ): _lowerCamelCase : Tuple = parent _lowerCamelCase : Any = batch_size _lowerCamelCase : Tuple = seq_length _lowerCamelCase : str = is_training _lowerCamelCase : Optional[int] = use_labels _lowerCamelCase : List[Any] = vocab_size _lowerCamelCase : Dict = hidden_size _lowerCamelCase : str = num_hidden_layers _lowerCamelCase : Optional[int] = num_attention_heads _lowerCamelCase : Any = intermediate_size _lowerCamelCase : Dict = hidden_act _lowerCamelCase : Any = hidden_dropout_prob _lowerCamelCase : List[str] = attention_probs_dropout_prob _lowerCamelCase : Optional[Any] = max_position_embeddings _lowerCamelCase : List[Any] = eos_token_id _lowerCamelCase : Tuple = pad_token_id _lowerCamelCase : List[str] = bos_token_id _lowerCamelCase : Optional[int] = embed_dim _lowerCamelCase : List[str] = word_embed_proj_dim _lowerCamelCase : Any = False def A_ ( self ): _lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _lowerCamelCase : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _lowerCamelCase : str = tf.concat([input_ids, eos_tensor] , axis=1 ) _lowerCamelCase : Tuple = self.config_cls( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase , **self.config_updates , ) _lowerCamelCase : int = prepare_opt_inputs_dict(lowercase , lowercase ) return config, inputs_dict def A_ ( self , lowercase , lowercase ): _lowerCamelCase : Optional[Any] = TFOPTModel(config=lowercase ) _lowerCamelCase : Optional[Any] = inputs_dict['input_ids'] _lowerCamelCase : str = input_ids[:1, :] _lowerCamelCase : Dict = inputs_dict['attention_mask'][:1, :] _lowerCamelCase : Optional[Any] = 1 # first forward pass _lowerCamelCase : Any = model(lowercase , attention_mask=lowercase , use_cache=lowercase ) _lowerCamelCase, _lowerCamelCase : List[str] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _lowerCamelCase : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) _lowerCamelCase : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _lowerCamelCase : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 ) _lowerCamelCase : Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _lowerCamelCase : Optional[Any] = model(lowercase , attention_mask=lowercase )[0] _lowerCamelCase : List[str] = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _lowerCamelCase : Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _lowerCamelCase : Optional[int] = output_from_no_past[:, -3:, random_slice_idx] _lowerCamelCase : List[str] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 ) @require_tf class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else () lowerCamelCase__ = (TFOPTForCausalLM,) if is_tf_available() else () lowerCamelCase__ = ( {"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = 10 def A_ ( self ): _lowerCamelCase : int = TFOPTModelTester(self ) _lowerCamelCase : Tuple = ConfigTester(self , config_class=lowercase ) def A_ ( self ): self.config_tester.run_common_tests() def A_ ( self ): _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase ) def A_ ( self ): _lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(lowercase , lowercase ): if hasattr(lowercase , 'weight' ): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model.build() if hasattr(lowercase , 'weight' ): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 10, config.vocab_size + 10]: # build the embeddings _lowerCamelCase : Optional[int] = model_class(config=lowercase ) _lowerCamelCase : int = _get_word_embedding_weight(lowercase , model.get_input_embeddings() ) _lowerCamelCase : Tuple = _get_word_embedding_weight(lowercase , model.get_output_embeddings() ) # reshape the embeddings model.resize_token_embeddings(lowercase ) _lowerCamelCase : str = _get_word_embedding_weight(lowercase , model.get_input_embeddings() ) _lowerCamelCase : Any = _get_word_embedding_weight(lowercase , model.get_output_embeddings() ) # check that the resized embeddings size matches the desired size. _lowerCamelCase : Union[str, Any] = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0] , lowercase ) # check that weights remain the same after resizing _lowerCamelCase : int = True for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: _lowerCamelCase : Optional[Any] = False self.assertTrue(lowercase ) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0] , lowercase ) _lowerCamelCase : Dict = True for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: _lowerCamelCase : Union[str, Any] = False self.assertTrue(lowercase ) def _snake_case ( lowercase__ ): return tf.constant(lowercase__ , dtype=tf.intaa ) @require_tf class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = 99 def A_ ( self ): _lowerCamelCase : Tuple = tf.ones((4, 1) , dtype=tf.intaa ) * 2 _lowerCamelCase : Tuple = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 ) _lowerCamelCase : int = input_ids.shape[0] _lowerCamelCase : List[Any] = OPTConfig( vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size @require_sentencepiece @require_tf class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def A_ ( self ): _lowerCamelCase : Tuple = TFOPTModel.from_pretrained('facebook/opt-350m' ) _lowerCamelCase : List[Any] = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] ) _lowerCamelCase : List[str] = tf.not_equal(lowercase , model.config.pad_token_id ) with tf.GradientTape(): _lowerCamelCase : List[str] = model(input_ids=lowercase , attention_mask=lowercase ).last_hidden_state _lowerCamelCase : Optional[Any] = (1, 11, 512) self.assertEqual(output.shape , lowercase ) _lowerCamelCase : List[str] = tf.constant( [[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] ) self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=4E-3 ) ) _lowerCamelCase : List[str] = tf.function(lowercase , jit_compile=lowercase ) _lowerCamelCase : Union[str, Any] = xla_generate(lowercase , lowercase )[0] self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=4E-2 ) ) @require_tf @slow class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def A_ ( self ): super().setUp() _lowerCamelCase : List[Any] = 'facebook/opt-350m' def A_ ( self ): _lowerCamelCase : int = TFOPTForCausalLM.from_pretrained(self.path_model ) _lowerCamelCase : List[Any] = GPTaTokenizer.from_pretrained(self.path_model ) _lowerCamelCase : List[str] = [ 'Today is a beautiful day and I want to', 'In the city of', 'Paris is the capital of France and', 'Computers and mobile phones have taken', ] # verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False _lowerCamelCase : List[Any] = tokenizer(lowercase , return_tensors='tf' , padding=lowercase , add_special_tokens=lowercase ) _lowerCamelCase : Optional[int] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) _lowerCamelCase : Any = tf.constant( [ [1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70], [-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22], [0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03], [6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77], ] ) self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-4 ) ) _lowerCamelCase : Tuple = tf.function(lowercase , jit_compile=lowercase ) _lowerCamelCase : List[Any] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-4 ) ) @require_tf @slow class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @property def A_ ( self ): return [ "Today is a beautiful day and I want", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] def A_ ( self ): _lowerCamelCase : str = 'facebook/opt-125m' _lowerCamelCase : Dict = [ 'Today is a beautiful day and I want to', 'In the city of New York, the city', 'Paris is the capital of France and the capital', 'Computers and mobile phones have taken over the', ] _lowerCamelCase : Optional[int] = [] _lowerCamelCase : Optional[int] = GPTaTokenizer.from_pretrained(lowercase ) _lowerCamelCase : Dict = TFOPTForCausalLM.from_pretrained(lowercase ) for prompt in self.prompts: _lowerCamelCase : int = tokenizer(lowercase , return_tensors='tf' ).input_ids _lowerCamelCase : int = model.generate(lowercase , max_length=10 ) _lowerCamelCase : Any = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase ) predicted_outputs += generated_string self.assertListEqual(lowercase , lowercase ) def A_ ( self ): _lowerCamelCase : List[Any] = 'facebook/opt-350m' _lowerCamelCase : int = GPTaTokenizer.from_pretrained(lowercase ) _lowerCamelCase : Optional[int] = TFOPTForCausalLM.from_pretrained(lowercase ) _lowerCamelCase : Any = 'left' # use different length sentences to test batching _lowerCamelCase : Optional[int] = [ 'Hello, my dog is a little', 'Today, I', ] _lowerCamelCase : Dict = tokenizer(lowercase , return_tensors='tf' , padding=lowercase ) _lowerCamelCase : int = inputs['input_ids'] _lowerCamelCase : Tuple = model.generate(input_ids=lowercase , attention_mask=inputs['attention_mask'] ) _lowerCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids _lowerCamelCase : Union[str, Any] = model.generate(input_ids=lowercase ) _lowerCamelCase : Dict = inputs_non_padded.shape[-1] - tf.math.reduce_sum( tf.cast(inputs['attention_mask'][-1] , tf.intaa ) ) _lowerCamelCase : int = tokenizer(sentences[1] , return_tensors='tf' ).input_ids _lowerCamelCase : Union[str, Any] = model.generate(input_ids=lowercase , max_length=model.config.max_length - num_paddings ) _lowerCamelCase : List[Any] = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase ) _lowerCamelCase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase ) _lowerCamelCase : Optional[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase ) _lowerCamelCase : Optional[Any] = [ 'Hello, my dog is a little bit of a dork.\nI\'m a little bit', 'Today, I was in the middle of a conversation with a friend about the', ] self.assertListEqual(lowercase , lowercase ) self.assertListEqual(lowercase , [non_padded_sentence, padded_sentence] ) def A_ ( self ): _lowerCamelCase : Tuple = 'facebook/opt-350m' _lowerCamelCase : List[Any] = [ 'Today is a beautiful day and I want to', 'In the city of San Francisco, the city', 'Paris is the capital of France and the capital', 'Computers and mobile phones have taken over the', ] _lowerCamelCase : Optional[int] = [] _lowerCamelCase : Optional[Any] = GPTaTokenizer.from_pretrained(lowercase ) _lowerCamelCase : Optional[Any] = TFOPTForCausalLM.from_pretrained(lowercase ) for prompt in self.prompts: _lowerCamelCase : List[Any] = tokenizer(lowercase , return_tensors='tf' ).input_ids _lowerCamelCase : Optional[Any] = model.generate(lowercase , max_length=10 ) _lowerCamelCase : Dict = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase ) predicted_outputs += generated_string self.assertListEqual(lowercase , lowercase )
12
0
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class A ( unittest.TestCase ): """simple docstring""" def snake_case__ ( self : Dict )-> Any: '''simple docstring''' A__ = tempfile.mkdtemp() # fmt: off A__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on A__ = dict(zip(snake_case__,range(len(snake_case__ ) ) ) ) A__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] A__ = {"unk_token": "<unk>"} A__ = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] ) A__ = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file,'w',encoding='utf-8' ) as fp: fp.write(json.dumps(snake_case__ ) + '\n' ) with open(self.merges_file,'w',encoding='utf-8' ) as fp: fp.write('\n'.join(snake_case__ ) ) A__ = { "do_resize": True, "size": 2_0, "do_center_crop": True, "crop_size": 1_8, "do_normalize": True, "image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073], "image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711], } A__ = os.path.join(self.tmpdirname,snake_case__ ) with open(self.image_processor_file,'w',encoding='utf-8' ) as fp: json.dump(snake_case__,snake_case__ ) def snake_case__ ( self : Union[str, Any],**lowercase_ : Tuple )-> Optional[int]: '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname,**snake_case__ ) def snake_case__ ( self : int,**lowercase_ : int )-> Dict: '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname,**snake_case__ ) def snake_case__ ( self : Any,**lowercase_ : Tuple )-> int: '''simple docstring''' return CLIPImageProcessor.from_pretrained(self.tmpdirname,**snake_case__ ) def snake_case__ ( self : Optional[Any] )-> str: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def snake_case__ ( self : int )-> Optional[int]: '''simple docstring''' A__ = [np.random.randint(2_5_5,size=(3, 3_0, 4_0_0),dtype=np.uinta )] A__ = [Image.fromarray(np.moveaxis(snake_case__,0,-1 ) ) for x in image_inputs] return image_inputs def snake_case__ ( self : Optional[Any] )-> Tuple: '''simple docstring''' A__ = self.get_tokenizer() A__ = self.get_rust_tokenizer() A__ = self.get_image_processor() A__ = CLIPProcessor(tokenizer=snake_case__,image_processor=snake_case__ ) processor_slow.save_pretrained(self.tmpdirname ) A__ = CLIPProcessor.from_pretrained(self.tmpdirname,use_fast=snake_case__ ) A__ = CLIPProcessor(tokenizer=snake_case__,image_processor=snake_case__ ) processor_fast.save_pretrained(self.tmpdirname ) A__ = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab(),tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab(),tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab(),tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer,snake_case__ ) self.assertIsInstance(processor_fast.tokenizer,snake_case__ ) self.assertEqual(processor_slow.image_processor.to_json_string(),image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string(),image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor,snake_case__ ) self.assertIsInstance(processor_fast.image_processor,snake_case__ ) def snake_case__ ( self : Any )-> Any: '''simple docstring''' A__ = CLIPProcessor(tokenizer=self.get_tokenizer(),image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A__ = self.get_tokenizer(bos_token='(BOS)',eos_token='(EOS)' ) A__ = self.get_image_processor(do_normalize=snake_case__,padding_value=1.0 ) A__ = CLIPProcessor.from_pretrained( self.tmpdirname,bos_token='(BOS)',eos_token='(EOS)',do_normalize=snake_case__,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(),tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer,snake_case__ ) self.assertEqual(processor.image_processor.to_json_string(),image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor,snake_case__ ) def snake_case__ ( self : str )-> Optional[int]: '''simple docstring''' A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=snake_case__,image_processor=snake_case__ ) A__ = self.prepare_image_inputs() A__ = image_processor(snake_case__,return_tensors='np' ) A__ = processor(images=snake_case__,return_tensors='np' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum(),input_processor[key].sum(),delta=1E-2 ) def snake_case__ ( self : Any )-> str: '''simple docstring''' A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=snake_case__,image_processor=snake_case__ ) A__ = "lower newer" A__ = processor(text=snake_case__ ) A__ = tokenizer(snake_case__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key],encoded_processor[key] ) def snake_case__ ( self : List[str] )-> str: '''simple docstring''' A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=snake_case__,image_processor=snake_case__ ) A__ = "lower newer" A__ = self.prepare_image_inputs() A__ = processor(text=snake_case__,images=snake_case__ ) self.assertListEqual(list(inputs.keys() ),['input_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(snake_case__ ): processor() def snake_case__ ( self : str )-> Optional[Any]: '''simple docstring''' A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=snake_case__,image_processor=snake_case__ ) A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A__ = processor.batch_decode(snake_case__ ) A__ = tokenizer.batch_decode(snake_case__ ) self.assertListEqual(snake_case__,snake_case__ ) def snake_case__ ( self : Union[str, Any] )-> int: '''simple docstring''' A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=snake_case__,image_processor=snake_case__ ) A__ = "lower newer" A__ = self.prepare_image_inputs() A__ = processor(text=snake_case__,images=snake_case__ ) self.assertListEqual(list(inputs.keys() ),processor.model_input_names )
7
"""simple docstring""" def a__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if principal <= 0: raise Exception("Principal borrowed must be > 0" ) if rate_per_annum < 0: raise Exception("Rate of interest must be >= 0" ) if years_to_repay <= 0 or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): raise Exception("Years to repay must be an integer > 0" ) # Yearly rate is divided by 12 to get monthly rate lowerCAmelCase : Tuple = rate_per_annum / 1_2 # Years to repay is multiplied by 12 to get number of payments as payment is monthly lowerCAmelCase : List[Any] = years_to_repay * 1_2 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
108
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A_ : Optional[int] =logging.get_logger(__name__) A_ : Tuple ={ 'andreasmadsen/efficient_mlm_m0.40': ( 'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json' ), } class __a ( __SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE__ : Optional[Any] = "roberta-prelayernorm" def __init__( self , a__=5_02_65 , a__=7_68 , a__=12 , a__=12 , a__=30_72 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_12 , a__=2 , a__=0.02 , a__=1e-12 , a__=1 , a__=0 , a__=2 , a__="absolute" , a__=True , a__=None , **a__ , ): super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) _lowerCamelCase = vocab_size _lowerCamelCase = hidden_size _lowerCamelCase = num_hidden_layers _lowerCamelCase = num_attention_heads _lowerCamelCase = hidden_act _lowerCamelCase = intermediate_size _lowerCamelCase = hidden_dropout_prob _lowerCamelCase = attention_probs_dropout_prob _lowerCamelCase = max_position_embeddings _lowerCamelCase = type_vocab_size _lowerCamelCase = initializer_range _lowerCamelCase = layer_norm_eps _lowerCamelCase = position_embedding_type _lowerCamelCase = use_cache _lowerCamelCase = classifier_dropout class __a ( __SCREAMING_SNAKE_CASE ): @property def snake_case_ ( self ): if self.task == "multiple-choice": _lowerCamelCase = {0: "batch", 1: "choice", 2: "sequence"} else: _lowerCamelCase = {0: "batch", 1: "sequence"} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
364
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer A_ : List[str] =logging.get_logger(__name__) A_ : Optional[Any] ={"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} A_ : Tuple ={ """vocab_file""": { """junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""", """junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""", """junnyu/roformer_chinese_char_small""": ( """https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt""" ), """junnyu/roformer_chinese_char_base""": ( """https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt""" ), """junnyu/roformer_small_discriminator""": ( """https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt""" ), """junnyu/roformer_small_generator""": ( """https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt""" ), } } A_ : Any ={ """junnyu/roformer_chinese_small""": 1_5_3_6, """junnyu/roformer_chinese_base""": 1_5_3_6, """junnyu/roformer_chinese_char_small""": 5_1_2, """junnyu/roformer_chinese_char_base""": 5_1_2, """junnyu/roformer_small_discriminator""": 1_2_8, """junnyu/roformer_small_generator""": 1_2_8, } A_ : List[str] ={ """junnyu/roformer_chinese_small""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_base""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True}, """junnyu/roformer_small_discriminator""": {"""do_lower_case""": True}, """junnyu/roformer_small_generator""": {"""do_lower_case""": True}, } class __a ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE__ : Optional[int] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : str = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ : Optional[int] = PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE__ : List[Any] = RoFormerTokenizer def __init__( self , a__=None , a__=None , a__=True , a__="[UNK]" , a__="[SEP]" , a__="[PAD]" , a__="[CLS]" , a__="[MASK]" , a__=True , a__=None , **a__ , ): super().__init__( a__ , tokenizer_file=a__ , do_lower_case=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , tokenize_chinese_chars=a__ , strip_accents=a__ , **a__ , ) _lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get('lowercase' , a__ ) != do_lower_case or pre_tok_state.get('strip_accents' , a__ ) != strip_accents ): _lowerCamelCase = getattr(a__ , pre_tok_state.pop('type' ) ) _lowerCamelCase = do_lower_case _lowerCamelCase = strip_accents _lowerCamelCase = pre_tok_class(**a__ ) _lowerCamelCase = do_lower_case def __getstate__( self ): _lowerCamelCase = self.__dict__.copy() _lowerCamelCase = BertPreTokenizer() return state def __setstate__( self , a__ ): _lowerCamelCase = d _lowerCamelCase = self.__dict__['_tokenizer'].get_vocab() _lowerCamelCase = PreTokenizer.custom(JiebaPreTokenizer(a__ ) ) def snake_case_ ( self , a__ , a__=None ): _lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def snake_case_ ( self , a__ , a__ = None ): _lowerCamelCase = [self.sep_token_id] _lowerCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case_ ( self , a__ , a__ = None ): _lowerCamelCase = self._tokenizer.model.save(a__ , name=a__ ) return tuple(a__ ) def snake_case_ ( self , a__ , a__=None , a__=None , a__=False , **a__ , ): _lowerCamelCase = BertPreTokenizer() return super().save_pretrained(a__ , a__ , a__ , a__ , **a__ )
80
0
from string import ascii_uppercase a ={str(ord(c) - 55): c for c in ascii_uppercase} def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> str: if isinstance(lowerCamelCase__ , lowerCamelCase__ ): raise TypeError('int() can\'t convert non-string with explicit base' ) if num < 0: raise ValueError('parameter must be positive int' ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ): raise TypeError('\'str\' object cannot be interpreted as an integer' ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ): raise TypeError('\'float\' object cannot be interpreted as an integer' ) if base in (0, 1): raise ValueError('base must be >= 2' ) if base > 3_6: raise ValueError('base must be <= 36' ) __lowerCamelCase : List[str] = '' __lowerCamelCase : List[Any] = 0 __lowerCamelCase : Optional[int] = 0 while div != 1: __lowerCamelCase , __lowerCamelCase : List[Any] = divmod(lowerCamelCase__ , lowerCamelCase__ ) if base >= 1_1 and 9 < mod < 3_6: __lowerCamelCase : Optional[int] = ALPHABET_VALUES[str(lowerCamelCase__ )] else: __lowerCamelCase : Optional[Any] = str(lowerCamelCase__ ) new_value += actual_value __lowerCamelCase : Union[str, Any] = num // base __lowerCamelCase : Union[str, Any] = div if div == 0: return str(new_value[::-1] ) elif div == 1: new_value += str(lowerCamelCase__ ) return str(new_value[::-1] ) return new_value[::-1] if __name__ == "__main__": import doctest doctest.testmod() for base in range(2, 37): for num in range(1000): assert int(decimal_to_any(num, base), base) == num, ( num, base, decimal_to_any(num, base), int(decimal_to_any(num, base), base), )
73
from __future__ import annotations from collections.abc import Callable __UpperCAmelCase = list[list[float | int]] def A__ ( __lowerCamelCase, __lowerCamelCase ): SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase ) SCREAMING_SNAKE_CASE_ = [[0 for _ in range(size + 1 )] for _ in range(__lowerCamelCase )] SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 42 for row in range(__lowerCamelCase ): for col in range(__lowerCamelCase ): SCREAMING_SNAKE_CASE_ = matrix[row][col] SCREAMING_SNAKE_CASE_ = vector[row][0] SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 0 while row < size and col < size: # pivoting SCREAMING_SNAKE_CASE_ = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCamelCase, __lowerCamelCase ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = augmented[pivot_row], augmented[row] for rowa in range(row + 1, __lowerCamelCase ): SCREAMING_SNAKE_CASE_ = augmented[rowa][col] / augmented[row][col] SCREAMING_SNAKE_CASE_ = 0 for cola in range(col + 1, size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1, __lowerCamelCase ): for row in range(__lowerCamelCase ): SCREAMING_SNAKE_CASE_ = augmented[row][col] / augmented[col][col] for cola in range(__lowerCamelCase, size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row], 10 )] for row in range(__lowerCamelCase ) ] def A__ ( __lowerCamelCase ): SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase ) SCREAMING_SNAKE_CASE_ = [[0 for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )] SCREAMING_SNAKE_CASE_ = [[0] for _ in range(__lowerCamelCase )] SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 42 for x_val, y_val in enumerate(__lowerCamelCase ): for col in range(__lowerCamelCase ): SCREAMING_SNAKE_CASE_ = (x_val + 1) ** (size - col - 1) SCREAMING_SNAKE_CASE_ = y_val SCREAMING_SNAKE_CASE_ = solve(__lowerCamelCase, __lowerCamelCase ) def interpolated_func(__lowerCamelCase ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(__lowerCamelCase ) ) return interpolated_func def A__ ( __lowerCamelCase ): return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def A__ ( __lowerCamelCase = question_function, __lowerCamelCase = 10 ): SCREAMING_SNAKE_CASE_ = [func(__lowerCamelCase ) for x_val in range(1, order + 1 )] SCREAMING_SNAKE_CASE_ = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 ) ] SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 42 for poly in polynomials: SCREAMING_SNAKE_CASE_ = 1 while func(__lowerCamelCase ) == poly(__lowerCamelCase ): x_val += 1 ret += poly(__lowerCamelCase ) return ret if __name__ == "__main__": print(F"""{solution() = }""")
299
0
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch _UpperCAmelCase : Any = logging.get_logger(__name__) class __lowerCAmelCase : def __init__( self: str , _lowerCAmelCase: List[Any] = None , _lowerCAmelCase: List[str] = None , _lowerCAmelCase: int=None , _lowerCAmelCase: Optional[int]=None ): if not conversation_id: lowercase :Optional[int] = uuid.uuida() if past_user_inputs is None: lowercase :List[str] = [] if generated_responses is None: lowercase :Optional[int] = [] lowercase :Optional[int] = conversation_id lowercase :List[Any] = past_user_inputs lowercase :Tuple = generated_responses lowercase :Union[str, Any] = text def __eq__( self: Tuple , _lowerCAmelCase: int ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: Any = False ): if self.new_user_input: if overwrite: logger.warning( F"User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten " F"with: \"{text}\"." ) lowercase :Dict = text else: logger.warning( F"User input added while unprocessed input was existing: \"{self.new_user_input}\" new input " F"ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input" ) else: lowercase :Optional[Any] = text def SCREAMING_SNAKE_CASE ( self: Any ): if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) lowercase :Tuple = None def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: str ): self.generated_responses.append(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self: List[str] ): for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self: Optional[Any] ): lowercase :Any = F"Conversation id: {self.uuid} \n" for is_user, text in self.iter_texts(): lowercase :Tuple = "user" if is_user else "bot" output += F"{name} >> {text} \n" return output @add_end_docstrings( lowerCAmelCase , r'''\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ''' , ) class __lowerCAmelCase ( lowerCAmelCase): def __init__( self: Union[str, Any] , *_lowerCAmelCase: Dict , **_lowerCAmelCase: int ): super().__init__(*__lowerCAmelCase , **__lowerCAmelCase ) if self.tokenizer.pad_token_id is None: lowercase :List[str] = self.tokenizer.eos_token def SCREAMING_SNAKE_CASE ( self: List[str] , _lowerCAmelCase: str=None , _lowerCAmelCase: str=None , _lowerCAmelCase: List[Any]=None , **_lowerCAmelCase: List[Any] ): lowercase :List[Any] = {} lowercase :Optional[Any] = {} lowercase :List[str] = {} if min_length_for_response is not None: lowercase :Optional[int] = min_length_for_response if minimum_tokens is not None: lowercase :int = minimum_tokens if "max_length" in generate_kwargs: lowercase :Optional[Any] = generate_kwargs["max_length"] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: lowercase :Any = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(__lowerCAmelCase ) return preprocess_params, forward_params, postprocess_params def __call__( self: str , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Optional[Any]=0 , **_lowerCAmelCase: Dict ): lowercase :List[str] = super().__call__(__lowerCAmelCase , num_workers=__lowerCAmelCase , **__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(__lowerCAmelCase ) == 1: return outputs[0] return outputs def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: Any , _lowerCAmelCase: List[Any]=32 ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError("ConversationalPipeline, expects Conversation as inputs" ) if conversation.new_user_input is None: raise ValueError( F"Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. " "Add user inputs with the conversation's `add_user_input` method" ) if hasattr(self.tokenizer , "_build_conversation_input_ids" ): lowercase :Union[str, Any] = self.tokenizer._build_conversation_input_ids(__lowerCAmelCase ) else: # If the tokenizer cannot handle conversations, we default to only the old version lowercase :Optional[Any] = self._legacy_parse_and_tokenize(__lowerCAmelCase ) if self.framework == "pt": lowercase :Dict = torch.LongTensor([input_ids] ) elif self.framework == "tf": lowercase :Optional[Any] = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: Dict , _lowerCAmelCase: Tuple=10 , **_lowerCAmelCase: Optional[int] ): lowercase :int = generate_kwargs.get("max_length" , self.model.config.max_length ) lowercase :Tuple = model_inputs["input_ids"].shape[1] if max_length - minimum_tokens < n: logger.warning(F"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})" ) lowercase :Optional[Any] = max_length - minimum_tokens lowercase :Tuple = model_inputs["input_ids"][:, -trim:] if "attention_mask" in model_inputs: lowercase :Optional[Any] = model_inputs["attention_mask"][:, -trim:] lowercase :List[Any] = model_inputs.pop("conversation" ) lowercase :Any = max_length lowercase :List[str] = self.model.generate(**__lowerCAmelCase , **__lowerCAmelCase ) if self.model.config.is_encoder_decoder: lowercase :Tuple = 1 else: lowercase :Optional[Any] = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Optional[int]=True ): lowercase :Tuple = model_outputs["output_ids"] lowercase :Optional[Any] = self.tokenizer.decode( output_ids[0] , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , ) lowercase :Tuple = model_outputs["conversation"] conversation.mark_processed() conversation.append_response(__lowerCAmelCase ) return conversation def SCREAMING_SNAKE_CASE ( self: List[str] , _lowerCAmelCase: List[str] ): lowercase :List[Any] = self.tokenizer.eos_token_id lowercase :Dict = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) ) if len(__lowerCAmelCase ) > self.tokenizer.model_max_length: lowercase :int = input_ids[-self.tokenizer.model_max_length :] return input_ids
360
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion # and https://github.com/hojonathanho/diffusion import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.schedulers.scheduling_utils import SchedulerMixin from diffusers.utils import BaseOutput, deprecate @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM class __lowerCAmelCase ( lowerCAmelCase): _a = 42 _a = None def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase=0.999, lowerCamelCase="cosine", ): if alpha_transform_type == "cosine": def alpha_bar_fn(lowerCamelCase ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(lowerCamelCase ): return math.exp(t * -12.0 ) else: raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" ) lowercase :Optional[int] = [] for i in range(lowerCamelCase ): lowercase :Any = i / num_diffusion_timesteps lowercase :str = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(lowerCamelCase ) / alpha_bar_fn(lowerCamelCase ), lowerCamelCase ) ) return torch.tensor(lowerCamelCase, dtype=torch.floataa ) class __lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase): _a = 1 @register_to_config def __init__( self: Any , _lowerCAmelCase: int = 10_00 , _lowerCAmelCase: float = 0.00_01 , _lowerCAmelCase: float = 0.02 , _lowerCAmelCase: str = "linear" , _lowerCAmelCase: Optional[Union[np.ndarray, List[float]]] = None , _lowerCAmelCase: bool = True , _lowerCAmelCase: bool = True , _lowerCAmelCase: int = 0 , _lowerCAmelCase: str = "epsilon" , _lowerCAmelCase: float = 1.0 , **_lowerCAmelCase: Union[str, Any] , ): if kwargs.get("set_alpha_to_one" , _lowerCAmelCase ) is not None: lowercase :Optional[int] = ( "The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead." ) deprecate("set_alpha_to_one" , "1.0.0" , _lowerCAmelCase , standard_warn=_lowerCAmelCase ) lowercase :str = kwargs["set_alpha_to_one"] if trained_betas is not None: lowercase :int = torch.tensor(_lowerCAmelCase , dtype=torch.floataa ) elif beta_schedule == "linear": lowercase :List[Any] = torch.linspace(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. lowercase :Tuple = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCAmelCase , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule lowercase :Any = betas_for_alpha_bar(_lowerCAmelCase ) else: raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}" ) lowercase :Dict = 1.0 - self.betas lowercase :Dict = torch.cumprod(self.alphas , dim=0 ) # At every step in inverted ddim, we are looking into the next alphas_cumprod # For the final step, there is no next alphas_cumprod, and the index is out of bounds # `set_alpha_to_zero` decides whether we set this parameter simply to zero # in this case, self.step() just output the predicted noise # or whether we use the final alpha of the "non-previous" one. lowercase :Any = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1] # standard deviation of the initial noise distribution lowercase :Union[str, Any] = 1.0 # setable values lowercase :str = None lowercase :List[Any] = torch.from_numpy(np.arange(0 , _lowerCAmelCase ).copy().astype(np.intaa ) ) def SCREAMING_SNAKE_CASE ( self: Optional[Any] , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: Optional[int] = None ): return sample def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: int , _lowerCAmelCase: Union[str, torch.device] = None ): if num_inference_steps > self.config.num_train_timesteps: raise ValueError( F"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:" F" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" F" maximal {self.config.num_train_timesteps} timesteps." ) lowercase :List[Any] = num_inference_steps lowercase :Optional[Any] = self.config.num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 lowercase :str = (np.arange(0 , _lowerCAmelCase ) * step_ratio).round().copy().astype(np.intaa ) lowercase :str = torch.from_numpy(_lowerCAmelCase ).to(_lowerCAmelCase ) self.timesteps += self.config.steps_offset def SCREAMING_SNAKE_CASE ( self: List[str] , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: int , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: float = 0.0 , _lowerCAmelCase: bool = False , _lowerCAmelCase: Optional[torch.FloatTensor] = None , _lowerCAmelCase: bool = True , ): # 1. get previous step value (=t+1) lowercase :int = timestep + self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas # change original implementation to exactly match noise levels for analogous forward process lowercase :List[Any] = self.alphas_cumprod[timestep] lowercase :Dict = ( self.alphas_cumprod[prev_timestep] if prev_timestep < self.config.num_train_timesteps else self.final_alpha_cumprod ) lowercase :Optional[Any] = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf if self.config.prediction_type == "epsilon": lowercase :int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 lowercase :Optional[Any] = model_output elif self.config.prediction_type == "sample": lowercase :Union[str, Any] = model_output lowercase :List[str] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 elif self.config.prediction_type == "v_prediction": lowercase :Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output lowercase :str = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample else: raise ValueError( F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" " `v_prediction`" ) # 4. Clip or threshold "predicted x_0" if self.config.clip_sample: lowercase :Optional[Any] = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf lowercase :List[Any] = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf lowercase :Tuple = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if not return_dict: return (prev_sample, pred_original_sample) return DDIMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase ) def __len__( self: List[str] ): return self.config.num_train_timesteps
158
0
import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def lowercase ( SCREAMING_SNAKE_CASE__ : List[str] ) -> int: if ( (cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F) or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) # or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) # or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) # or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) # or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) # or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F) or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) # ): # return True return False def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> List[Any]: for char in word: _snake_case : Optional[Any] = ord(UpperCamelCase__ ) if not _is_chinese_char(UpperCamelCase__ ): return 0 return 1 def lowercase ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict: _snake_case : int = set() for token in tokens: _snake_case : List[str] = len(UpperCamelCase__ ) > 1 and is_chinese(UpperCamelCase__ ) if chinese_word: word_set.add(UpperCamelCase__ ) _snake_case : Union[str, Any] = list(UpperCamelCase__ ) return word_list def lowercase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : set() ) -> str: if not chinese_word_set: return bert_tokens _snake_case : List[str] = max([len(UpperCamelCase__ ) for w in chinese_word_set] ) _snake_case : Optional[int] = bert_tokens _snake_case , _snake_case : Optional[int] = 0, len(UpperCamelCase__ ) while start < end: _snake_case : Optional[Any] = True if is_chinese(bert_word[start] ): _snake_case : Dict = min(end - start , UpperCamelCase__ ) for i in range(UpperCamelCase__ , 1 , -1 ): _snake_case : Any = """""".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): _snake_case : Dict = """##""" + bert_word[j] _snake_case : Union[str, Any] = start + i _snake_case : Tuple = False break if single_word: start += 1 return bert_word def lowercase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : LTP , SCREAMING_SNAKE_CASE__ : BertTokenizer ) -> int: _snake_case : Tuple = [] for i in range(0 , len(UpperCamelCase__ ) , 100 ): _snake_case : Optional[int] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws _snake_case : List[str] = [get_chinese_word(UpperCamelCase__ ) for r in res] ltp_res.extend(UpperCamelCase__ ) assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ) _snake_case : Union[str, Any] = [] for i in range(0 , len(UpperCamelCase__ ) , 100 ): _snake_case : Optional[int] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=512 ) bert_res.extend(res["""input_ids"""] ) assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ) _snake_case : Optional[int] = [] for input_ids, chinese_word in zip(UpperCamelCase__ , UpperCamelCase__ ): _snake_case : Tuple = [] for id in input_ids: _snake_case : List[str] = bert_tokenizer._convert_id_to_token(UpperCamelCase__ ) input_tokens.append(UpperCamelCase__ ) _snake_case : List[str] = add_sub_symbol(UpperCamelCase__ , UpperCamelCase__ ) _snake_case : List[str] = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(UpperCamelCase__ ): if token[:2] == "##": _snake_case : List[Any] = token[2:] # save chinese tokens' pos if len(UpperCamelCase__ ) == 1 and _is_chinese_char(ord(UpperCamelCase__ ) ): ref_id.append(UpperCamelCase__ ) ref_ids.append(UpperCamelCase__ ) assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ) return ref_ids def lowercase ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]: with open(args.file_name , """r""" , encoding="""utf-8""" ) as f: _snake_case : Optional[int] = f.readlines() _snake_case : Any = [line.strip() for line in data if len(UpperCamelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' _snake_case : int = LTP(args.ltp ) # faster in GPU device _snake_case : Any = BertTokenizer.from_pretrained(args.bert ) _snake_case : Dict = prepare_ref(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) with open(args.save_path , """w""" , encoding="""utf-8""" ) as f: _snake_case : int = [json.dumps(UpperCamelCase__ ) + """\n""" for ref in ref_ids] f.writelines(UpperCamelCase__ ) if __name__ == "__main__": a__ = argparse.ArgumentParser(description="""prepare_chinese_ref""") parser.add_argument( """--file_name""", required=False, type=str, default="""./resources/chinese-demo.txt""", help="""file need process, same as training data in lm""", ) parser.add_argument( """--ltp""", required=False, type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path""", ) parser.add_argument( """--bert""", required=False, type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""", ) parser.add_argument( """--save_path""", required=False, type=str, default="""./resources/ref.txt""", help="""path to save res""", ) a__ = parser.parse_args() main(args)
317
import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch __A = random.Random() def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str]=1.0 , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[int]=None ) -> Optional[Any]: """simple docstring""" if rng is None: __lowerCamelCase = global_rng __lowerCamelCase = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=400 , lowerCamelCase__=2_000 , lowerCamelCase__=10 , lowerCamelCase__=160 , lowerCamelCase__=8 , lowerCamelCase__=0.0 , lowerCamelCase__=4_000 , lowerCamelCase__=False , lowerCamelCase__=True , ) -> List[str]: '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = min_seq_length __lowerCamelCase = max_seq_length __lowerCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __lowerCamelCase = padding_value __lowerCamelCase = sampling_rate __lowerCamelCase = return_attention_mask __lowerCamelCase = do_normalize __lowerCamelCase = feature_size __lowerCamelCase = chunk_length __lowerCamelCase = hop_length def lowercase_ ( self ) -> Any: '''simple docstring''' return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def lowercase_ ( self , lowerCamelCase__=False , lowerCamelCase__=False ) -> Optional[int]: '''simple docstring''' def _flatten(lowerCamelCase__ ): return list(itertools.chain(*lowerCamelCase__ ) ) if equal_length: __lowerCamelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size __lowerCamelCase = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __lowerCamelCase = [np.asarray(lowerCamelCase__ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" snake_case_ = WhisperFeatureExtractor if is_speech_available() else None def lowercase_ ( self ) -> Any: '''simple docstring''' __lowerCamelCase = WhisperFeatureExtractionTester(self ) def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __lowerCamelCase = feat_extract_first.save_pretrained(lowerCamelCase__ )[0] check_json_file_has_correct_format(lowerCamelCase__ ) __lowerCamelCase = self.feature_extraction_class.from_pretrained(lowerCamelCase__ ) __lowerCamelCase = feat_extract_first.to_dict() __lowerCamelCase = feat_extract_second.to_dict() __lowerCamelCase = feat_extract_first.mel_filters __lowerCamelCase = feat_extract_second.mel_filters self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) ) self.assertEqual(lowerCamelCase__ , lowerCamelCase__ ) def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __lowerCamelCase = os.path.join(lowerCamelCase__ , 'feat_extract.json' ) feat_extract_first.to_json_file(lowerCamelCase__ ) __lowerCamelCase = self.feature_extraction_class.from_json_file(lowerCamelCase__ ) __lowerCamelCase = feat_extract_first.to_dict() __lowerCamelCase = feat_extract_second.to_dict() __lowerCamelCase = feat_extract_first.mel_filters __lowerCamelCase = feat_extract_second.mel_filters self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) ) self.assertEqual(lowerCamelCase__ , lowerCamelCase__ ) def lowercase_ ( self ) -> Dict: '''simple docstring''' # Tests that all call wrap to encode_plus and batch_encode_plus __lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __lowerCamelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] __lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs] # Test feature size __lowerCamelCase = feature_extractor(lowerCamelCase__ , padding='max_length' , return_tensors='np' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input __lowerCamelCase = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features __lowerCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) ) # Test batched __lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='np' ).input_features __lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='np' ).input_features for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ): self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. __lowerCamelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)] __lowerCamelCase = np.asarray(lowerCamelCase__ ) __lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='np' ).input_features __lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='np' ).input_features for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ): self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) ) # Test truncation required __lowerCamelCase = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )] __lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs] __lowerCamelCase = [x[: feature_extractor.n_samples] for x in speech_inputs] __lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs_truncated] __lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='np' ).input_features __lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='np' ).input_features for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ): self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) ) def lowercase_ ( self ) -> List[str]: '''simple docstring''' import torch __lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCamelCase = np.random.rand(100 , 32 ).astype(np.floataa ) __lowerCamelCase = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __lowerCamelCase = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) __lowerCamelCase = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def lowercase_ ( self , lowerCamelCase__ ) -> List[str]: '''simple docstring''' __lowerCamelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' ) # automatic decoding with librispeech __lowerCamelCase = ds.sort('id' ).select(range(lowerCamelCase__ ) )[:num_samples]['audio'] return [x["array"] for x in speech_samples] def lowercase_ ( self ) -> Tuple: '''simple docstring''' # fmt: off __lowerCamelCase = torch.tensor( [ 0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51, 0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78, 0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54, -0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54 ] ) # fmt: on __lowerCamelCase = self._load_datasamples(1 ) __lowerCamelCase = WhisperFeatureExtractor() __lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='pt' ).input_features self.assertEqual(input_features.shape , (1, 80, 3_000) ) self.assertTrue(torch.allclose(input_features[0, 0, :30] , lowerCamelCase__ , atol=1e-4 ) ) def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCamelCase = self._load_datasamples(1 )[0] __lowerCamelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue __lowerCamelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCamelCase__ )[0] self.assertTrue(np.all(np.mean(lowerCamelCase__ ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__ ) - 1 ) < 1e-3 ) )
90
0
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> list: _lowerCAmelCase =word.split() def justify(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str: _lowerCAmelCase =max_width - width _lowerCAmelCase =len(__UpperCamelCase ) if len(__UpperCamelCase ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: _lowerCAmelCase =words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] _lowerCAmelCase =spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] _lowerCAmelCase =( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(__UpperCamelCase ): num_spaces_between_words_list[i] += 1 _lowerCAmelCase =[] for i in range(__UpperCamelCase ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * """ """ ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(__UpperCamelCase ) _lowerCAmelCase =[] _lowerCAmelCase =[] _lowerCAmelCase =0 for word in words: if width + len(__UpperCamelCase ) + len(__UpperCamelCase ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(__UpperCamelCase ) width += len(__UpperCamelCase ) else: # justify the line and add it to result answer.append(justify(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) ) # reset new line and new width _lowerCAmelCase , _lowerCAmelCase =[word], len(__UpperCamelCase ) _lowerCAmelCase =max_width - width - len(__UpperCamelCase ) answer.append(""" """.join(__UpperCamelCase ) + (remaining_spaces + 1) * """ """ ) return answer if __name__ == "__main__": from doctest import testmod testmod()
370
"""simple docstring""" import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' lowerCamelCase = JukeboxTokenizer lowerCamelCase = { '''artist''': '''Zac Brown Band''', '''genres''': '''Country''', '''lyrics''': '''I met a traveller from an antique land, Who said "Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away ''', } @require_torch def _lowerCAmelCase ( self ) -> str: import torch _lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" ) _lowerCAmelCase =tokenizer(**self.metas )["""input_ids"""] # fmt: off _lowerCAmelCase =[ torch.tensor([[ 0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 10_69, 11]] ), torch.tensor([[0, 0, 0, 10_69, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def _lowerCAmelCase ( self ) -> Any: import torch _lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" ) _lowerCAmelCase =tokenizer(**self.metas )["""input_ids"""] # fmt: off _lowerCAmelCase =[ torch.tensor([[ 0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
341
0
import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor UpperCAmelCase_ : List[str] = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( _a ): def __init__( self : Optional[Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : str ): warnings.warn( """The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use ChineseCLIPImageProcessor instead.""" , __lowerCamelCase , ) super().__init__(*__lowerCamelCase , **__lowerCamelCase )
38
"""simple docstring""" from typing import List import numpy as np def lowercase ( a__ : dict ) -> int: _UpperCamelCase = {key: len(a__ ) for key, value in gen_kwargs.items() if isinstance(a__ , a__ )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( '''Sharding is ambiguous for this dataset: ''' + '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n''' + '''\n'''.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ''' + '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.''' ) ) _UpperCamelCase = max(lists_lengths.values() , default=0 ) return max(1 , a__ ) def lowercase ( a__ : int , a__ : int ) -> List[range]: _UpperCamelCase = [] for group_idx in range(a__ ): _UpperCamelCase = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break _UpperCamelCase = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 _UpperCamelCase = range(a__ , start + num_shards_to_add ) shards_indices_per_group.append(a__ ) return shards_indices_per_group def lowercase ( a__ : dict , a__ : int ) -> List[dict]: _UpperCamelCase = _number_of_shards_in_gen_kwargs(a__ ) if num_shards == 1: return [dict(a__ )] else: _UpperCamelCase = _distribute_shards(num_shards=a__ , max_num_jobs=a__ ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(a__ , a__ ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(a__ ) ) ] def lowercase ( a__ : List[dict] ) -> dict: return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , a__ ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def lowercase ( a__ : np.random.Generator , a__ : dict ) -> dict: _UpperCamelCase = {len(a__ ) for value in gen_kwargs.values() if isinstance(a__ , a__ )} _UpperCamelCase = {} for size in list_sizes: _UpperCamelCase = list(range(a__ ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes _UpperCamelCase = dict(a__ ) for key, value in shuffled_kwargs.items(): if isinstance(a__ , a__ ): _UpperCamelCase = [value[i] for i in indices_per_size[len(a__ )]] return shuffled_kwargs
256
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor snake_case__ : Tuple = logging.get_logger(__name__) class snake_case_( a__ ): def __init__( self : Optional[Any] , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : List[str] ): warnings.warn( '''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use PoolFormerImageProcessor instead.''' , UpperCamelCase_ , ) super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
314
"""simple docstring""" import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class snake_case_: def __init__( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int=sys.maxsize ): lowerCAmelCase : Tuple = '''bilinear''' lowerCAmelCase : List[Any] = max_size lowerCAmelCase : Optional[int] = short_edge_length def __call__( self : Optional[int] , UpperCamelCase_ : Optional[int] ): lowerCAmelCase : Tuple = [] for img in imgs: lowerCAmelCase, lowerCAmelCase : List[str] = img.shape[:2] # later: provide list and randomly choose index for resize lowerCAmelCase : int = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img lowerCAmelCase : Optional[Any] = size * 1.0 / min(UpperCamelCase_ , UpperCamelCase_ ) if h < w: lowerCAmelCase, lowerCAmelCase : List[str] = size, scale * w else: lowerCAmelCase, lowerCAmelCase : int = scale * h, size if max(UpperCamelCase_ , UpperCamelCase_ ) > self.max_size: lowerCAmelCase : Union[str, Any] = self.max_size * 1.0 / max(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Tuple = newh * scale lowerCAmelCase : str = neww * scale lowerCAmelCase : Union[str, Any] = int(neww + 0.5 ) lowerCAmelCase : str = int(newh + 0.5 ) if img.dtype == np.uinta: lowerCAmelCase : Tuple = Image.fromarray(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) lowerCAmelCase : Union[str, Any] = np.asarray(UpperCamelCase_ ) else: lowerCAmelCase : List[str] = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw lowerCAmelCase : Optional[int] = nn.functional.interpolate( UpperCamelCase_ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase_ ).squeeze(0 ) img_augs.append(UpperCamelCase_ ) return img_augs class snake_case_: def __init__( self : Tuple , UpperCamelCase_ : Any ): lowerCAmelCase : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) lowerCAmelCase : List[Any] = cfg.INPUT.FORMAT lowerCAmelCase : Tuple = cfg.SIZE_DIVISIBILITY lowerCAmelCase : int = cfg.PAD_VALUE lowerCAmelCase : Union[str, Any] = cfg.INPUT.MAX_SIZE_TEST lowerCAmelCase : Union[str, Any] = cfg.MODEL.DEVICE lowerCAmelCase : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) lowerCAmelCase : List[Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) lowerCAmelCase : Optional[int] = lambda UpperCamelCase_ : (x - self.pixel_mean) / self.pixel_std def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : List[Any] ): lowerCAmelCase : Dict = tuple(max(UpperCamelCase_ ) for s in zip(*[img.shape for img in images] ) ) lowerCAmelCase : Dict = [im.shape[-2:] for im in images] lowerCAmelCase : Dict = [ nn.functional.pad( UpperCamelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(UpperCamelCase_ , UpperCamelCase_ ) ] return torch.stack(UpperCamelCase_ ), torch.tensor(UpperCamelCase_ ) def __call__( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int]=False ): with torch.no_grad(): if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): lowerCAmelCase : List[Any] = [images] if single_image: assert len(UpperCamelCase_ ) == 1 for i in range(len(UpperCamelCase_ ) ): if isinstance(images[i] , torch.Tensor ): images.insert(UpperCamelCase_ , images.pop(UpperCamelCase_ ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( UpperCamelCase_ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase_ ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge lowerCAmelCase : Dict = torch.tensor([im.shape[:2] for im in images] ) lowerCAmelCase : str = self.aug(UpperCamelCase_ ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic lowerCAmelCase : int = [self.normalizer(UpperCamelCase_ ) for x in images] # now pad them to do the following operations lowerCAmelCase, lowerCAmelCase : Optional[Any] = self.pad(UpperCamelCase_ ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad lowerCAmelCase : Union[str, Any] = torch.true_divide(UpperCamelCase_ , UpperCamelCase_ ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def _snake_case ( _snake_case : str , _snake_case : List[Any] ): boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def _snake_case ( _snake_case : Any , _snake_case : Tuple[int, int] ): assert torch.isfinite(_snake_case ).all(), "Box tensor contains infinite or NaN!" lowerCAmelCase, lowerCAmelCase : Optional[int] = box_size tensor[:, 0].clamp_(min=0 , max=_snake_case ) tensor[:, 1].clamp_(min=0 , max=_snake_case ) tensor[:, 2].clamp_(min=0 , max=_snake_case ) tensor[:, 3].clamp_(min=0 , max=_snake_case )
314
1
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ : str = logging.get_logger(__name__) UpperCAmelCase_ : List[Any] = { 'nielsr/canine-s': 2048, } # Unicode defines 1,114,112 total “codepoints” UpperCAmelCase_ : Union[str, Any] = 111_4112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py UpperCAmelCase_ : List[str] = 0 UpperCAmelCase_ : str = 0Xe_000 UpperCAmelCase_ : Optional[Any] = 0Xe_001 UpperCAmelCase_ : Union[str, Any] = 0Xe_002 UpperCAmelCase_ : Tuple = 0Xe_003 UpperCAmelCase_ : List[Any] = 0Xe_004 # Maps special codepoints to human-readable names. UpperCAmelCase_ : Dict[int, str] = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. UpperCAmelCase_ : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple=chr(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ : Any=chr(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ : str=chr(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ : int=chr(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ : Union[str, Any]=chr(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ : int=chr(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : List[str]=2_0_4_8 , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> List[Any]: a_ : List[str] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else bos_token a_ : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else eos_token a_ : Optional[int] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else sep_token a_ : Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cls_token a_ : Tuple = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it a_ : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token super().__init__( bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , model_max_length=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) # Creates a mapping for looking up the IDs of special symbols. a_ : Dict[str, int] = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): a_ : Optional[int] = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. a_ : Dict[int, str] = { codepoint: name for name, codepoint in self._special_codepoints.items() } a_ : Dict = UNICODE_VOCAB_SIZE a_ : Any = len(self._special_codepoints ) @property def SCREAMING_SNAKE_CASE ( self : str ) -> int: return self._unicode_vocab_size def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]: return list(SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : str ) -> int: try: return ord(SCREAMING_SNAKE_CASE__ ) except TypeError: raise ValueError(F"""invalid token: '{token}'""" ) def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> str: try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(SCREAMING_SNAKE_CASE__ ) except TypeError: raise ValueError(F"""invalid id: {index}""" ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Union[str, Any]: return "".join(SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]: a_ : int = [self.sep_token_id] a_ : Optional[int] = [self.cls_token_id] a_ : Optional[int] = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ ) a_ : Dict = [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] if token_ids_a is not None: result += ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] return result def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]: a_ : int = [self.sep_token_id] a_ : int = [self.cls_token_id] a_ : Optional[Any] = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Union[str, Any]: return ()
32
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ : Dict = logging.get_logger(__name__) UpperCAmelCase_ : Union[str, Any] = { 'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json', } class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : List[str] = '''switch_transformers''' snake_case__ : Optional[int] = ['''past_key_values'''] snake_case__ : Optional[Any] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int]=3_2_1_2_8 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=6_4 , SCREAMING_SNAKE_CASE__ : List[str]=2_0_4_8 , SCREAMING_SNAKE_CASE__ : Dict=6_4 , SCREAMING_SNAKE_CASE__ : List[Any]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : str=3 , SCREAMING_SNAKE_CASE__ : Tuple=1_2 , SCREAMING_SNAKE_CASE__ : Tuple=8 , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.01 , SCREAMING_SNAKE_CASE__ : str="float32" , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_2 , SCREAMING_SNAKE_CASE__ : Dict=1_2_8 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Dict=1E-6 , SCREAMING_SNAKE_CASE__ : Dict=0.001 , SCREAMING_SNAKE_CASE__ : Any=0.001 , SCREAMING_SNAKE_CASE__ : Optional[int]=1.0 , SCREAMING_SNAKE_CASE__ : Any="relu" , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Optional[int]=1 , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Optional[Any]: a_ : Optional[int] = vocab_size a_ : List[str] = d_model a_ : Tuple = d_kv a_ : Optional[Any] = d_ff a_ : List[Any] = num_sparse_encoder_layers a_ : Any = num_layers a_ : str = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry a_ : List[Any] = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: a_ : Optional[int] = self.num_layers // self.num_sparse_encoder_layers else: a_ : List[Any] = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: a_ : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers else: a_ : List[str] = self.num_decoder_layers # HACK: this will create 0 sparse layers a_ : Dict = num_heads a_ : str = num_experts a_ : Any = expert_capacity a_ : List[Any] = router_bias a_ : str = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" ) a_ : Optional[int] = router_dtype a_ : int = router_ignore_padding_tokens a_ : Any = relative_attention_num_buckets a_ : List[str] = relative_attention_max_distance a_ : Optional[Any] = dropout_rate a_ : Tuple = layer_norm_epsilon a_ : Dict = initializer_factor a_ : Any = feed_forward_proj a_ : Tuple = use_cache a_ : str = add_router_probs a_ : Optional[int] = router_z_loss_coef a_ : List[str] = router_aux_loss_coef a_ : int = self.feed_forward_proj.split('-' ) a_ : int = act_info[-1] a_ : Optional[int] = act_info[0] == 'gated' if len(SCREAMING_SNAKE_CASE__ ) > 1 and act_info[0] != "gated" or len(SCREAMING_SNAKE_CASE__ ) > 2: raise ValueError( F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": a_ : Any = 'gelu_new' super().__init__( pad_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
32
1
"""simple docstring""" from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) _lowerCAmelCase : Optional[int] = { """snap-research/efficientformer-l1-300""": ( """https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json""" ), } class lowerCAmelCase__ ( __lowerCamelCase ): SCREAMING_SNAKE_CASE_ ='efficientformer' def __init__( self : str , snake_case__ : List[int] = [3, 2, 6, 4] , snake_case__ : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case__ : List[bool] = [True, True, True, True] , snake_case__ : int = 4_4_8 , snake_case__ : int = 3_2 , snake_case__ : int = 4 , snake_case__ : int = 7 , snake_case__ : int = 5 , snake_case__ : int = 8 , snake_case__ : int = 4 , snake_case__ : float = 0.0 , snake_case__ : int = 1_6 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 2 , snake_case__ : int = 1 , snake_case__ : float = 0.0 , snake_case__ : int = 1 , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : float = 1e-5 , snake_case__ : str = "gelu" , snake_case__ : float = 0.02 , snake_case__ : float = 1e-12 , snake_case__ : int = 2_2_4 , snake_case__ : float = 1e-05 , **snake_case__ : Tuple , ): '''simple docstring''' super().__init__(**UpperCamelCase_ ) UpperCAmelCase__ : List[str] = hidden_act UpperCAmelCase__ : Tuple = hidden_dropout_prob UpperCAmelCase__ : Any = hidden_sizes UpperCAmelCase__ : int = num_hidden_layers UpperCAmelCase__ : Tuple = num_attention_heads UpperCAmelCase__ : Any = initializer_range UpperCAmelCase__ : List[str] = layer_norm_eps UpperCAmelCase__ : Any = patch_size UpperCAmelCase__ : Any = num_channels UpperCAmelCase__ : Optional[Any] = depths UpperCAmelCase__ : Union[str, Any] = mlp_expansion_ratio UpperCAmelCase__ : Tuple = downsamples UpperCAmelCase__ : Optional[Any] = dim UpperCAmelCase__ : List[Any] = key_dim UpperCAmelCase__ : Union[str, Any] = attention_ratio UpperCAmelCase__ : List[str] = resolution UpperCAmelCase__ : Dict = pool_size UpperCAmelCase__ : Tuple = downsample_patch_size UpperCAmelCase__ : Optional[Any] = downsample_stride UpperCAmelCase__ : Union[str, Any] = downsample_pad UpperCAmelCase__ : List[Any] = drop_path_rate UpperCAmelCase__ : Optional[Any] = num_metaad_blocks UpperCAmelCase__ : List[Any] = distillation UpperCAmelCase__ : Tuple = use_layer_scale UpperCAmelCase__ : List[Any] = layer_scale_init_value UpperCAmelCase__ : Optional[int] = image_size UpperCAmelCase__ : Dict = batch_norm_eps
357
"""simple docstring""" # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class lowerCAmelCase__ : SCREAMING_SNAKE_CASE_ =42 # setable values SCREAMING_SNAKE_CASE_ =42 SCREAMING_SNAKE_CASE_ =42 SCREAMING_SNAKE_CASE_ =None @classmethod def __a ( cls : Optional[int] , snake_case__ : CommonSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray ): '''simple docstring''' return cls(common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ ) @dataclass class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ =42 class lowerCAmelCase__ ( __magic_name__ , __magic_name__ ): SCREAMING_SNAKE_CASE_ =[e.name for e in FlaxKarrasDiffusionSchedulers] SCREAMING_SNAKE_CASE_ =42 @property def __a ( self : Union[str, Any] ): '''simple docstring''' return True @register_to_config def __init__( self : Tuple , snake_case__ : int = 1_0_0_0 , snake_case__ : float = 0.0001 , snake_case__ : float = 0.02 , snake_case__ : str = "linear" , snake_case__ : Optional[jnp.ndarray] = None , snake_case__ : str = "fixed_small" , snake_case__ : bool = True , snake_case__ : str = "epsilon" , snake_case__ : jnp.dtype = jnp.floataa , ): '''simple docstring''' UpperCAmelCase__ : Tuple = dtype def __a ( self : Any , snake_case__ : Optional[CommonSchedulerState] = None ): '''simple docstring''' if common is None: UpperCAmelCase__ : Any = CommonSchedulerState.create(self ) # standard deviation of the initial noise distribution UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype ) UpperCAmelCase__ : Optional[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1] return DDPMSchedulerState.create( common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ , ) def __a ( self : int , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : Optional[int] = None ): '''simple docstring''' return sample def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Tuple = () ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 UpperCAmelCase__ : Tuple = (jnp.arange(0 , snake_case__ ) * step_ratio).round()[::-1] return state.replace( num_inference_steps=snake_case__ , timesteps=snake_case__ , ) def __a ( self : List[str] , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Any=None , snake_case__ : Union[str, Any]=None ): '''simple docstring''' UpperCAmelCase__ : int = state.common.alphas_cumprod[t] UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample UpperCAmelCase__ : int = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: UpperCAmelCase__ : Union[str, Any] = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": UpperCAmelCase__ : int = jnp.clip(snake_case__ , a_min=1e-20 ) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": UpperCAmelCase__ : Union[str, Any] = jnp.log(jnp.clip(snake_case__ , a_min=1e-20 ) ) elif variance_type == "fixed_large": UpperCAmelCase__ : List[Any] = state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] ) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": UpperCAmelCase__ : List[str] = variance UpperCAmelCase__ : Optional[Any] = state.common.betas[t] UpperCAmelCase__ : Any = (predicted_variance + 1) / 2 UpperCAmelCase__ : Dict = frac * max_log + (1 - frac) * min_log return variance def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : int , snake_case__ : jnp.ndarray , snake_case__ : Optional[jax.random.KeyArray] = None , snake_case__ : bool = True , ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = timestep if key is None: UpperCAmelCase__ : Optional[int] = jax.random.PRNGKey(0 ) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = jnp.split(snake_case__ , sample.shape[1] , axis=1 ) else: UpperCAmelCase__ : int = None # 1. compute alphas, betas UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t] UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) UpperCAmelCase__ : List[str] = 1 - alpha_prod_t UpperCAmelCase__ : List[str] = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": UpperCAmelCase__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": UpperCAmelCase__ : List[Any] = model_output elif self.config.prediction_type == "v_prediction": UpperCAmelCase__ : int = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` ' " for the FlaxDDPMScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: UpperCAmelCase__ : Optional[Any] = jnp.clip(snake_case__ , -1 , 1 ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t UpperCAmelCase__ : Tuple = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCAmelCase__ : Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): UpperCAmelCase__ : List[str] = jax.random.split(snake_case__ , num=1 ) UpperCAmelCase__ : List[str] = jax.random.normal(snake_case__ , shape=model_output.shape , dtype=self.dtype ) return (self._get_variance(snake_case__ , snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) ) UpperCAmelCase__ : Optional[Any] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ , state=snake_case__ ) def __a ( self : List[Any] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ): '''simple docstring''' return add_noise_common(state.common , snake_case__ , snake_case__ , snake_case__ ) def __a ( self : Optional[int] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ): '''simple docstring''' return get_velocity_common(state.common , snake_case__ , snake_case__ , snake_case__ ) def __len__( self : Union[str, Any] ): '''simple docstring''' return self.config.num_train_timesteps
298
0
import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ : str = logging.get_logger(__name__) lowerCAmelCase__ : str = '''https://openaipublic.azureedge.net/jukebox/models/''' lowerCAmelCase__ : int = { '''jukebox-1b-lyrics''': [ '''5b/vqvae.pth.tar''', '''5b/prior_level_0.pth.tar''', '''5b/prior_level_1.pth.tar''', '''1b_lyrics/prior_level_2.pth.tar''', ], '''jukebox-5b-lyrics''': [ '''5b/vqvae.pth.tar''', '''5b/prior_level_0.pth.tar''', '''5b/prior_level_1.pth.tar''', '''5b_lyrics/prior_level_2.pth.tar''', ], } def UpperCamelCase__ ( A__ ) -> Any: if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10: snake_case__ : str = key.replace('.model.1.bias' , '.conv1d_1.bias' ) elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10: snake_case__ : Optional[Any] = key.replace('.model.1.weight' , '.conv1d_1.weight' ) elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10: snake_case__ : Dict = key.replace('.model.3.bias' , '.conv1d_2.bias' ) elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10: snake_case__ : Tuple = key.replace('.model.3.weight' , '.conv1d_2.weight' ) if "conditioner_blocks.0." in key: snake_case__ : Optional[Any] = key.replace('conditioner_blocks.0' , 'conditioner_blocks' ) if "prime_prior" in key: snake_case__ : Dict = key.replace('prime_prior' , 'encoder' ) if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: snake_case__ : Optional[Any] = key.replace('.emb.' , '.' ) if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook return key.replace('.k' , '.codebook' ) if "y_emb." in key: return key.replace('y_emb.' , 'metadata_embedding.' ) if "x_emb.emb." in key: snake_case__ : Optional[Any] = key.replace('0.x_emb.emb' , 'embed_tokens' ) if "prime_state_ln" in key: return key.replace('prime_state_ln' , 'encoder.final_layer_norm' ) if ".ln" in key: return key.replace('.ln' , '.layer_norm' ) if "_ln" in key: return key.replace('_ln' , '_layer_norm' ) if "prime_state_proj" in key: return key.replace('prime_state_proj' , 'encoder.proj_in' ) if "prime_x_out" in key: return key.replace('prime_x_out' , 'encoder.lm_head' ) if "prior.x_out" in key: return key.replace('x_out' , 'fc_proj_out' ) if "x_emb" in key: return key.replace('x_emb' , 'embed_tokens' ) return key def UpperCamelCase__ ( A__ , A__ , A__ , A__ ) -> Tuple: snake_case__ : Optional[Any] = {} import re snake_case__ : Tuple = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' ) snake_case__ : Tuple = re.compile( r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' ) snake_case__ : int = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' ) snake_case__ : Optional[int] = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' ) snake_case__ : Optional[Any] = re.compile( r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' ) snake_case__ : Any = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' ) snake_case__ : str = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' ) snake_case__ : Optional[Any] = re.compile( r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' ) snake_case__ : str = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' ) for original_key, value in state_dict.items(): # rename vqvae.encoder keys if re_encoder_block_conv_in.fullmatch(A__ ): snake_case__ : Dict = re_encoder_block_conv_in.match(A__ ) snake_case__ : int = regex_match.groups() snake_case__ : Tuple = int(groups[2] ) * 2 + int(groups[3] ) snake_case__ : int = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}""" snake_case__ : List[str] = re_encoder_block_conv_in.sub(A__ , A__ ) elif re_encoder_block_resnet.fullmatch(A__ ): snake_case__ : List[Any] = re_encoder_block_resnet.match(A__ ) snake_case__ : Any = regex_match.groups() snake_case__ : int = int(groups[2] ) * 2 + int(groups[3] ) snake_case__ : Any = {'1': 1, '3': 2}[groups[-2]] snake_case__ : Optional[int] = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.""" snake_case__ : Dict = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}""" snake_case__ : str = prefix + resnet_block snake_case__ : str = re_encoder_block_resnet.sub(A__ , A__ ) elif re_encoder_block_proj_out.fullmatch(A__ ): snake_case__ : Union[str, Any] = re_encoder_block_proj_out.match(A__ ) snake_case__ : Any = regex_match.groups() snake_case__ : Dict = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}""" snake_case__ : Optional[int] = re_encoder_block_proj_out.sub(A__ , A__ ) # rename vqvae.decoder keys elif re_decoder_block_conv_out.fullmatch(A__ ): snake_case__ : str = re_decoder_block_conv_out.match(A__ ) snake_case__ : Union[str, Any] = regex_match.groups() snake_case__ : Optional[int] = int(groups[2] ) * 2 + int(groups[3] ) - 2 snake_case__ : Tuple = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}""" snake_case__ : int = re_decoder_block_conv_out.sub(A__ , A__ ) elif re_decoder_block_resnet.fullmatch(A__ ): snake_case__ : Dict = re_decoder_block_resnet.match(A__ ) snake_case__ : Optional[Any] = regex_match.groups() snake_case__ : List[str] = int(groups[2] ) * 2 + int(groups[3] ) - 2 snake_case__ : Optional[Any] = {'1': 1, '3': 2}[groups[-2]] snake_case__ : Dict = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.""" snake_case__ : Optional[int] = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}""" snake_case__ : Tuple = prefix + resnet_block snake_case__ : List[str] = re_decoder_block_resnet.sub(A__ , A__ ) elif re_decoder_block_proj_in.fullmatch(A__ ): snake_case__ : List[str] = re_decoder_block_proj_in.match(A__ ) snake_case__ : str = regex_match.groups() snake_case__ : Tuple = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}""" snake_case__ : Optional[int] = re_decoder_block_proj_in.sub(A__ , A__ ) # rename prior cond.model to upsampler.upsample_block and resnet elif re_prior_cond_conv_out.fullmatch(A__ ): snake_case__ : List[Any] = re_prior_cond_conv_out.match(A__ ) snake_case__ : Dict = regex_match.groups() snake_case__ : Optional[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2 snake_case__ : List[Any] = F"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}""" snake_case__ : int = re_prior_cond_conv_out.sub(A__ , A__ ) elif re_prior_cond_resnet.fullmatch(A__ ): snake_case__ : Optional[int] = re_prior_cond_resnet.match(A__ ) snake_case__ : Union[str, Any] = regex_match.groups() snake_case__ : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2 snake_case__ : int = {'1': 1, '3': 2}[groups[-2]] snake_case__ : Optional[int] = F"""conditioner_blocks.upsampler.upsample_block.{block_index}.""" snake_case__ : List[str] = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}""" snake_case__ : int = prefix + resnet_block snake_case__ : List[Any] = re_prior_cond_resnet.sub(A__ , A__ ) elif re_prior_cond_proj_in.fullmatch(A__ ): snake_case__ : Optional[int] = re_prior_cond_proj_in.match(A__ ) snake_case__ : Optional[int] = regex_match.groups() snake_case__ : Any = F"""conditioner_blocks.upsampler.proj_in.{groups[-1]}""" snake_case__ : Optional[int] = re_prior_cond_proj_in.sub(A__ , A__ ) # keep original key else: snake_case__ : Any = original_key snake_case__ : int = replace_key(A__ ) if F"""{key_prefix}.{key}""" not in model_state_dict or key is None: print(F"""failed converting {original_key} to {key}, does not match""" ) # handle missmatched shape elif value.shape != model_state_dict[F"""{key_prefix}.{key}"""].shape: snake_case__ : List[str] = model_state_dict[F"""{key_prefix}.{key}"""] print(F"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" ) snake_case__ : Union[str, Any] = original_key snake_case__ : Tuple = original_key snake_case__ : Dict = value return new_dict @torch.no_grad() def UpperCamelCase__ ( A__=None , A__=None ) -> Any: for file in MODEL_MAPPING[model_name]: if not os.path.isfile(F"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" ): snake_case__ : Optional[Any] = requests.get(F"""{PREFIX}{file}""" , allow_redirects=A__ ) os.makedirs(F"""{pytorch_dump_folder_path}/""" , exist_ok=A__ ) open(F"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" , 'wb' ).write(r.content ) snake_case__ : Union[str, Any] = MODEL_MAPPING[model_name.split('/' )[-1]] snake_case__ : int = JukeboxConfig.from_pretrained(A__ ) snake_case__ : Optional[int] = JukeboxModel(A__ ) snake_case__ : Dict = [] snake_case__ : Dict = {} for i, dict_name in enumerate(A__ ): snake_case__ : Dict = torch.load(F"""{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}""" )['model'] snake_case__ : Optional[Any] = {} for k in old_dic.keys(): if k.endswith('.b' ): snake_case__ : Union[str, Any] = old_dic[k] elif k.endswith('.w' ): snake_case__ : Optional[Any] = old_dic[k] elif "level_2" not in dict_name and "cond.model." in k: snake_case__ : Union[str, Any] = old_dic[k] else: snake_case__ : Union[str, Any] = old_dic[k] snake_case__ : str = 'vqvae' if i == 0 else F"""priors.{3 - i}""" snake_case__ : Optional[int] = fix_jukebox_keys(A__ , model.state_dict() , A__ , A__ ) weight_dict.append(A__ ) snake_case__ : Optional[int] = weight_dict.pop(0 ) model.vqvae.load_state_dict(A__ ) for i in range(len(A__ ) ): model.priors[i].load_state_dict(weight_dict[2 - i] ) Path(A__ ).mkdir(exist_ok=A__ ) with open(F"""{pytorch_dump_folder_path}/mapping.json""" , 'w' ) as txtfile: json.dump(A__ , A__ ) print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(A__ ) return weight_dict if __name__ == "__main__": lowerCAmelCase__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''jukebox-5b-lyrics''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''jukebox-5b-lyrics-converted''', type=str, help='''Path to the output PyTorch model directory.''', ) lowerCAmelCase__ : Union[str, Any] = parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
143
from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase): @register_to_config def __init__( self: Optional[Any] , UpperCamelCase_: bool , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None ): super().__init__() __lowerCamelCase = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" __lowerCamelCase = torch.zeros(UpperCamelCase_ , UpperCamelCase_ ) else: __lowerCamelCase = None __lowerCamelCase = torch.nn.Parameter(UpperCamelCase_ ) class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : VQModel UpperCAmelCase__ : CLIPTextModel UpperCAmelCase__ : CLIPTokenizer UpperCAmelCase__ : TransformeraDModel UpperCAmelCase__ : LearnedClassifierFreeSamplingEmbeddings UpperCAmelCase__ : VQDiffusionScheduler def __init__( self: str , UpperCamelCase_: VQModel , UpperCamelCase_: CLIPTextModel , UpperCamelCase_: CLIPTokenizer , UpperCamelCase_: TransformeraDModel , UpperCamelCase_: VQDiffusionScheduler , UpperCamelCase_: LearnedClassifierFreeSamplingEmbeddings , ): super().__init__() self.register_modules( vqvae=UpperCamelCase_ , transformer=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , scheduler=UpperCamelCase_ , learned_classifier_free_sampling_embeddings=UpperCamelCase_ , ) def lowerCAmelCase__ ( self: str , UpperCamelCase_: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: Any ): __lowerCamelCase = len(UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else 1 # get prompt text embeddings __lowerCamelCase = self.tokenizer( UpperCamelCase_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) __lowerCamelCase = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: __lowerCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" F' {self.tokenizer.model_max_length} tokens: {removed_text}' ) __lowerCamelCase = text_input_ids[:, : self.tokenizer.model_max_length] __lowerCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 __lowerCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase_ ) # duplicate text embeddings for each generation per prompt __lowerCamelCase = prompt_embeds.repeat_interleave(UpperCamelCase_ , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: __lowerCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings __lowerCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(UpperCamelCase_ , 1 , 1 ) else: __lowerCamelCase = [""""""] * batch_size __lowerCamelCase = text_input_ids.shape[-1] __lowerCamelCase = self.tokenizer( UpperCamelCase_ , padding="""max_length""" , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors="""pt""" , ) __lowerCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings __lowerCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase_ ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method __lowerCamelCase = negative_prompt_embeds.shape[1] __lowerCamelCase = negative_prompt_embeds.repeat(1 , UpperCamelCase_ , 1 ) __lowerCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , UpperCamelCase_ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __lowerCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self: Tuple , UpperCamelCase_: Union[str, List[str]] , UpperCamelCase_: int = 1_00 , UpperCamelCase_: float = 5.0 , UpperCamelCase_: float = 1.0 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase_: int = 1 , ): if isinstance(UpperCamelCase_ , UpperCamelCase_ ): __lowerCamelCase = 1 elif isinstance(UpperCamelCase_ , UpperCamelCase_ ): __lowerCamelCase = len(UpperCamelCase_ ) else: raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase_ )}' ) __lowerCamelCase = batch_size * num_images_per_prompt __lowerCamelCase = guidance_scale > 1.0 __lowerCamelCase = self._encode_prompt(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or callback_steps <= 0) ): raise ValueError( F'`callback_steps` has to be a positive integer but is {callback_steps} of type' F' {type(UpperCamelCase_ )}.' ) # get the initial completely masked latents unless the user supplied it __lowerCamelCase = (batch_size, self.transformer.num_latent_pixels) if latents is None: __lowerCamelCase = self.transformer.num_vector_embeds - 1 __lowerCamelCase = torch.full(UpperCamelCase_ , UpperCamelCase_ ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( """Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,""" F' {self.transformer.num_vector_embeds - 1} (inclusive).' ) __lowerCamelCase = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(UpperCamelCase_ , device=self.device ) __lowerCamelCase = self.scheduler.timesteps.to(self.device ) __lowerCamelCase = latents for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ): # expand the sample if we are doing classifier free guidance __lowerCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` __lowerCamelCase = self.transformer(UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , timestep=UpperCamelCase_ ).sample if do_classifier_free_guidance: __lowerCamelCase, __lowerCamelCase = model_output.chunk(2 ) __lowerCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(UpperCamelCase_ , dim=1 , keepdim=UpperCamelCase_ ) __lowerCamelCase = self.truncate(UpperCamelCase_ , UpperCamelCase_ ) # remove `log(0)`'s (`-inf`s) __lowerCamelCase = model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 __lowerCamelCase = self.scheduler.step(UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) __lowerCamelCase = self.vqvae.config.vq_embed_dim __lowerCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) __lowerCamelCase = self.vqvae.quantize.get_codebook_entry(UpperCamelCase_ , shape=UpperCamelCase_ ) __lowerCamelCase = self.vqvae.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ ).sample __lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 ) __lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __lowerCamelCase = self.numpy_to_pil(UpperCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase_ ) def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: float ): __lowerCamelCase, __lowerCamelCase = torch.sort(UpperCamelCase_ , 1 , descending=UpperCamelCase_ ) __lowerCamelCase = torch.exp(UpperCamelCase_ ) __lowerCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out __lowerCamelCase = torch.full_like(keep_mask[:, 0:1, :] , UpperCamelCase_ ) __lowerCamelCase = torch.cat((all_true, keep_mask) , dim=1 ) __lowerCamelCase = keep_mask[:, :-1, :] __lowerCamelCase = keep_mask.gather(1 , indices.argsort(1 ) ) __lowerCamelCase = log_p_x_0.clone() __lowerCamelCase = -torch.inf # -inf = log(0) return rv
12
0
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :Tuple = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} SCREAMING_SNAKE_CASE :Tuple = { """tokenizer_file""": { """EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json""", }, } SCREAMING_SNAKE_CASE :Tuple = { """gpt-neox-20b""": 2048, } class __magic_name__ ( a__ ): UpperCamelCase_ :Dict = VOCAB_FILES_NAMES UpperCamelCase_ :Optional[int] = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ :Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ :Dict = ["""input_ids""", """attention_mask"""] def __init__( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase="<|endoftext|>" , _lowercase="<|endoftext|>" , _lowercase="<|endoftext|>" , _lowercase=False , **_lowercase , )-> str: super().__init__( _lowercase , _lowercase , tokenizer_file=_lowercase , unk_token=_lowercase , bos_token=_lowercase , eos_token=_lowercase , add_prefix_space=_lowercase , **_lowercase , ) UpperCamelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , _lowercase ) != add_prefix_space: UpperCamelCase_ = getattr(_lowercase , pre_tok_state.pop("type" ) ) UpperCamelCase_ = add_prefix_space UpperCamelCase_ = pre_tok_class(**_lowercase ) UpperCamelCase_ = add_prefix_space def UpperCAmelCase_ ( self , _lowercase , _lowercase = None )-> Any: UpperCamelCase_ = self._tokenizer.model.save(_lowercase , name=_lowercase ) return tuple(_lowercase ) def UpperCAmelCase_ ( self , _lowercase )-> List[str]: UpperCamelCase_ = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(_lowercase , add_special_tokens=_lowercase ) + [self.eos_token_id] ) if len(_lowercase ) > self.model_max_length: UpperCamelCase_ = input_ids[-self.model_max_length :] return input_ids
369
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __magic_name__ ( unittest.TestCase ): def __init__( self , _lowercase , _lowercase=7 , _lowercase=3 , _lowercase=18 , _lowercase=30 , _lowercase=400 , _lowercase=True , _lowercase=None , _lowercase=True , )-> Optional[int]: UpperCamelCase_ = size if size is not None else {"height": 18, "width": 18} UpperCamelCase_ = parent UpperCamelCase_ = batch_size UpperCamelCase_ = num_channels UpperCamelCase_ = image_size UpperCamelCase_ = min_resolution UpperCamelCase_ = max_resolution UpperCamelCase_ = do_resize UpperCamelCase_ = size UpperCamelCase_ = apply_ocr def UpperCAmelCase_ ( self )-> str: return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class __magic_name__ ( snake_case , unittest.TestCase ): UpperCamelCase_ :Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None def UpperCAmelCase_ ( self )-> Any: UpperCamelCase_ = LayoutLMvaImageProcessingTester(self ) @property def UpperCAmelCase_ ( self )-> Optional[Any]: return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase_ ( self )-> Dict: UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowercase , "do_resize" ) ) self.assertTrue(hasattr(_lowercase , "size" ) ) self.assertTrue(hasattr(_lowercase , "apply_ocr" ) ) def UpperCAmelCase_ ( self )-> List[Any]: UpperCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 18, "width": 18} ) UpperCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"height": 42, "width": 42} ) def UpperCAmelCase_ ( self )-> Any: pass def UpperCAmelCase_ ( self )-> List[str]: # Initialize image_processing UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase ) for image in image_inputs: self.assertIsInstance(_lowercase , Image.Image ) # Test not batched input UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) self.assertIsInstance(encoding.words , _lowercase ) self.assertIsInstance(encoding.boxes , _lowercase ) # Test batched UpperCamelCase_ = image_processing(_lowercase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def UpperCAmelCase_ ( self )-> str: # Initialize image_processing UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase ) for image in image_inputs: self.assertIsInstance(_lowercase , np.ndarray ) # Test not batched input UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched UpperCamelCase_ = image_processing(_lowercase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def UpperCAmelCase_ ( self )-> List[str]: # Initialize image_processing UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase ) for image in image_inputs: self.assertIsInstance(_lowercase , torch.Tensor ) # Test not batched input UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched UpperCamelCase_ = image_processing(_lowercase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def UpperCAmelCase_ ( self )-> Any: # with apply_OCR = True UpperCamelCase_ = LayoutLMvaImageProcessor() from datasets import load_dataset UpperCamelCase_ = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" ) UpperCamelCase_ = Image.open(ds[0]["file"] ).convert("RGB" ) UpperCamelCase_ = image_processing(_lowercase , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 UpperCamelCase_ = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231 UpperCamelCase_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , _lowercase ) self.assertListEqual(encoding.boxes , _lowercase ) # with apply_OCR = False UpperCamelCase_ = LayoutLMvaImageProcessor(apply_ocr=_lowercase ) UpperCamelCase_ = image_processing(_lowercase , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
60
0
"""simple docstring""" import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel __UpperCamelCase = logging.getLogger(__name__) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]: # save results if os.path.exists(UpperCAmelCase ): if os.path.exists(os.path.join(UpperCAmelCase , 'config.json' ) ) and os.path.isfile( os.path.join(UpperCAmelCase , 'config.json' ) ): os.remove(os.path.join(UpperCAmelCase , 'config.json' ) ) if os.path.exists(os.path.join(UpperCAmelCase , 'pytorch_model.bin' ) ) and os.path.isfile( os.path.join(UpperCAmelCase , 'pytorch_model.bin' ) ): os.remove(os.path.join(UpperCAmelCase , 'pytorch_model.bin' ) ) else: os.makedirs(UpperCAmelCase ) model.save_pretrained(UpperCAmelCase ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=False ) -> Union[str, Any]: snake_case_ = 2 if unlogit: snake_case_ = torch.pow(UpperCAmelCase , UpperCAmelCase ) snake_case_ = p * torch.log(UpperCAmelCase ) snake_case_ = 0 return -plogp.sum(dim=-1 ) def UpperCAmelCase ( UpperCAmelCase ) -> List[str]: logger.info('lv, h >\t' + '\t'.join(f'{x + 1}' for x in range(len(UpperCAmelCase ) ) ) ) for row in range(len(UpperCAmelCase ) ): if tensor.dtype != torch.long: logger.info(f'layer {row + 1}:\t' + '\t'.join(f'{x:.5f}' for x in tensor[row].cpu().data ) ) else: logger.info(f'layer {row + 1}:\t' + '\t'.join(f'{x:d}' for x in tensor[row].cpu().data ) ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=False ) -> Union[str, Any]: snake_case_ , snake_case_ = model.config.num_hidden_layers, model.config.num_attention_heads snake_case_ = torch.zeros(UpperCAmelCase , UpperCAmelCase ).to(args.device ) snake_case_ = torch.zeros(UpperCAmelCase , UpperCAmelCase ).to(args.device ) if head_mask is None: snake_case_ = torch.ones(UpperCAmelCase , UpperCAmelCase ).to(args.device ) head_mask.requires_grad_(requires_grad=UpperCAmelCase ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: snake_case_ = None snake_case_ = 0.0 snake_case_ = 0.0 for step, inputs in enumerate(tqdm(UpperCAmelCase , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ): snake_case_ = tuple(t.to(args.device ) for t in inputs ) ((snake_case_) , ) = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) snake_case_ = model(UpperCAmelCase , labels=UpperCAmelCase , head_mask=UpperCAmelCase ) # (loss), lm_logits, presents, (all hidden_states), (attentions) snake_case_ , snake_case_ , snake_case_ = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(UpperCAmelCase ): snake_case_ = entropy(attn.detach() , UpperCAmelCase ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(UpperCAmelCase ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: snake_case_ = 2 snake_case_ = torch.pow(torch.pow(UpperCAmelCase , UpperCAmelCase ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20 if not args.dont_normalize_global_importance: snake_case_ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('Attention entropies' ) print_ad_tensor(UpperCAmelCase ) if compute_importance: logger.info('Head importance scores' ) print_ad_tensor(UpperCAmelCase ) logger.info('Head ranked by importance scores' ) snake_case_ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) snake_case_ = torch.arange( head_importance.numel() , device=args.device ) snake_case_ = head_ranks.view_as(UpperCAmelCase ) print_ad_tensor(UpperCAmelCase ) return attn_entropy, head_importance, total_loss def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]: snake_case_ , snake_case_ , snake_case_ = compute_heads_importance(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , compute_entropy=UpperCAmelCase ) snake_case_ = 1 / loss # instead of downsteam score use the LM loss logger.info('Pruning: original score: %f, threshold: %f' , UpperCAmelCase , original_score * args.masking_threshold ) snake_case_ = torch.ones_like(UpperCAmelCase ) snake_case_ = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) snake_case_ = original_score while current_score >= original_score * args.masking_threshold: snake_case_ = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads snake_case_ = float('Inf' ) snake_case_ = head_importance.view(-1 ).sort()[1] if len(UpperCAmelCase ) <= num_to_mask: print('BREAK BY num_to_mask' ) break # mask heads snake_case_ = current_heads_to_mask[:num_to_mask] logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) ) snake_case_ = new_head_mask.view(-1 ) snake_case_ = 0.0 snake_case_ = new_head_mask.view_as(UpperCAmelCase ) snake_case_ = new_head_mask.clone().detach() print_ad_tensor(UpperCAmelCase ) # Compute metric and head importance again snake_case_ , snake_case_ , snake_case_ = compute_heads_importance( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , compute_entropy=UpperCAmelCase , head_mask=UpperCAmelCase ) snake_case_ = 1 / loss logger.info( 'Masking: current score: %f, remaining heads %d (%.1f percents)' , UpperCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info('Final head mask' ) print_ad_tensor(UpperCAmelCase ) np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() ) return head_mask def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]: snake_case_ = datetime.now() snake_case_ , snake_case_ , snake_case_ = compute_heads_importance( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , compute_entropy=UpperCAmelCase , compute_importance=UpperCAmelCase , head_mask=UpperCAmelCase ) snake_case_ = 1 / loss snake_case_ = datetime.now() - before_time snake_case_ = sum(p.numel() for p in model.parameters() ) snake_case_ = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(UpperCAmelCase ) ) } for k, v in heads_to_prune.items(): if isinstance(UpperCAmelCase , UpperCAmelCase ): snake_case_ = [ v, ] assert sum(len(UpperCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(UpperCAmelCase ) snake_case_ = sum(p.numel() for p in model.parameters() ) snake_case_ = datetime.now() snake_case_ , snake_case_ , snake_case_ = compute_heads_importance( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , compute_entropy=UpperCAmelCase , compute_importance=UpperCAmelCase , head_mask=UpperCAmelCase , actually_pruned=UpperCAmelCase , ) snake_case_ = 1 / loss snake_case_ = datetime.now() - before_time logger.info( 'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , UpperCAmelCase , UpperCAmelCase , pruned_num_params / original_num_params * 100 , ) logger.info('Pruning: score with masking: %f score with pruning: %f' , UpperCAmelCase , UpperCAmelCase ) logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 100 ) save_model(UpperCAmelCase , args.output_dir ) def UpperCAmelCase ( ) -> List[str]: snake_case_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--data_dir' , default=UpperCAmelCase , type=UpperCAmelCase , required=UpperCAmelCase , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , ) parser.add_argument( '--model_name_or_path' , default=UpperCAmelCase , type=UpperCAmelCase , required=UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--output_dir' , default=UpperCAmelCase , type=UpperCAmelCase , required=UpperCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , ) # Other parameters parser.add_argument( '--config_name' , default='' , type=UpperCAmelCase , help='Pretrained config name or path if not the same as model_name_or_path' , ) parser.add_argument( '--tokenizer_name' , default='' , type=UpperCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , ) parser.add_argument( '--cache_dir' , default=UpperCAmelCase , type=UpperCAmelCase , help='Where do you want to store the pre-trained models downloaded from s3' , ) parser.add_argument( '--data_subset' , type=UpperCAmelCase , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' ) parser.add_argument( '--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' ) parser.add_argument( '--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' ) parser.add_argument( '--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' ) parser.add_argument( '--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , ) parser.add_argument( '--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' ) parser.add_argument( '--masking_threshold' , default=0.9 , type=UpperCAmelCase , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , ) parser.add_argument( '--masking_amount' , default=0.1 , type=UpperCAmelCase , help='Amount to heads to masking at each masking step.' ) parser.add_argument('--metric_name' , default='acc' , type=UpperCAmelCase , help='Metric to use for head masking.' ) parser.add_argument( '--max_seq_length' , default=128 , type=UpperCAmelCase , help=( 'The maximum total input sequence length after WordPiece tokenization. \n' 'Sequences longer than this will be truncated, sequences shorter padded.' ) , ) parser.add_argument('--batch_size' , default=1 , type=UpperCAmelCase , help='Batch size.' ) parser.add_argument('--seed' , type=UpperCAmelCase , default=42 ) parser.add_argument('--local_rank' , type=UpperCAmelCase , default=-1 , help='local_rank for distributed training on gpus' ) parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' ) parser.add_argument('--server_ip' , type=UpperCAmelCase , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=UpperCAmelCase , default='' , help='Can be used for distant debugging.' ) snake_case_ = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCAmelCase ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: snake_case_ = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' ) snake_case_ = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) snake_case_ = torch.device('cuda' , args.local_rank ) snake_case_ = 1 torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) snake_case_ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: snake_case_ = nn.parallel.DistributedDataParallel( UpperCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=UpperCAmelCase ) elif args.n_gpu > 1: snake_case_ = nn.DataParallel(UpperCAmelCase ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=UpperCAmelCase ) torch.save(UpperCAmelCase , os.path.join(args.output_dir , 'run_args.bin' ) ) logger.info('Training/evaluation parameters %s' , UpperCAmelCase ) # Prepare dataset snake_case_ = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) snake_case_ = (torch.from_numpy(UpperCAmelCase ),) snake_case_ = TensorDataset(*UpperCAmelCase ) snake_case_ = RandomSampler(UpperCAmelCase ) snake_case_ = DataLoader(UpperCAmelCase , sampler=UpperCAmelCase , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: snake_case_ = mask_heads(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) prune_heads(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) if __name__ == "__main__": main()
69
'''simple docstring''' import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class lowercase_ ( enum.Enum ): __UpperCAmelCase = 0 __UpperCAmelCase = 1 __UpperCAmelCase = 2 @add_end_docstrings(a__ ) class lowercase_ ( a__ ): __UpperCAmelCase = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n ' def __init__( self , *a , **a ): super().__init__(*a , **a ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. UpperCamelCase__ = None if self.model.config.prefix is not None: UpperCamelCase__ = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. UpperCamelCase__ = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._sanitize_parameters(prefix=a , **self._forward_params ) UpperCamelCase__ = {**self._preprocess_params, **preprocess_params} UpperCamelCase__ = {**self._forward_params, **forward_params} def __a ( self , a=None , a=None , a=None , a=None , a=None , a=None , a=None , a=None , **a , ): UpperCamelCase__ = {} if prefix is not None: UpperCamelCase__ = prefix if prefix: UpperCamelCase__ = self.tokenizer( a , padding=a , add_special_tokens=a , return_tensors=self.framework ) UpperCamelCase__ = prefix_inputs["input_ids"].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected''' " [None, 'hole']" ) UpperCamelCase__ = handle_long_generation preprocess_params.update(a ) UpperCamelCase__ = generate_kwargs UpperCamelCase__ = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError("`return_text` is mutually exclusive with `return_full_text`" ) if return_tensors is not None: raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" ) UpperCamelCase__ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError("`return_text` is mutually exclusive with `return_tensors`" ) UpperCamelCase__ = ReturnType.TENSORS if return_type is not None: UpperCamelCase__ = return_type if clean_up_tokenization_spaces is not None: UpperCamelCase__ = clean_up_tokenization_spaces if stop_sequence is not None: UpperCamelCase__ = self.tokenizer.encode(a , add_special_tokens=a ) if len(a ) > 1: warnings.warn( "Stopping on a multiple token sequence is not yet supported on transformers. The first token of" " the stop sequence will be used as the stop sequence string in the interim." ) UpperCamelCase__ = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def __a ( self , *a , **a ): # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({"add_space_before_punct_symbol": True} ) return super()._parse_and_tokenize(*a , **a ) def __call__( self , a , **a ): return super().__call__(a , **a ) def __a ( self , a , a="" , a=None , **a ): UpperCamelCase__ = self.tokenizer( prefix + prompt_text , padding=a , add_special_tokens=a , return_tensors=self.framework ) UpperCamelCase__ = prompt_text if handle_long_generation == "hole": UpperCamelCase__ = inputs["input_ids"].shape[-1] if "max_new_tokens" in generate_kwargs: UpperCamelCase__ = generate_kwargs["max_new_tokens"] else: UpperCamelCase__ = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError("We cannot infer how many new tokens are expected" ) if cur_len + new_tokens > self.tokenizer.model_max_length: UpperCamelCase__ = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( "We cannot use `hole` to handle this generation the number of desired tokens exceeds the" " models max length" ) UpperCamelCase__ = inputs["input_ids"][:, -keep_length:] if "attention_mask" in inputs: UpperCamelCase__ = inputs["attention_mask"][:, -keep_length:] return inputs def __a ( self , a , **a ): UpperCamelCase__ = model_inputs["input_ids"] UpperCamelCase__ = model_inputs.get("attention_mask" , a ) # Allow empty prompts if input_ids.shape[1] == 0: UpperCamelCase__ = None UpperCamelCase__ = None UpperCamelCase__ = 1 else: UpperCamelCase__ = input_ids.shape[0] UpperCamelCase__ = model_inputs.pop("prompt_text" ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. UpperCamelCase__ = generate_kwargs.pop("prefix_length" , 0 ) if prefix_length > 0: UpperCamelCase__ = "max_new_tokens" in generate_kwargs or ( "generation_config" in generate_kwargs and generate_kwargs["generation_config"].max_new_tokens is not None ) if not has_max_new_tokens: UpperCamelCase__ = generate_kwargs.get("max_length" ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length UpperCamelCase__ = "min_new_tokens" in generate_kwargs or ( "generation_config" in generate_kwargs and generate_kwargs["generation_config"].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL UpperCamelCase__ = self.model.generate(input_ids=a , attention_mask=a , **a ) UpperCamelCase__ = generated_sequence.shape[0] if self.framework == "pt": UpperCamelCase__ = generated_sequence.reshape(a , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": UpperCamelCase__ = tf.reshape(a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def __a ( self , a , a=ReturnType.FULL_TEXT , a=True ): UpperCamelCase__ = model_outputs["generated_sequence"][0] UpperCamelCase__ = model_outputs["input_ids"] UpperCamelCase__ = model_outputs["prompt_text"] UpperCamelCase__ = generated_sequence.numpy().tolist() UpperCamelCase__ = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: UpperCamelCase__ = {"generated_token_ids": sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text UpperCamelCase__ = self.tokenizer.decode( a , skip_special_tokens=a , clean_up_tokenization_spaces=a , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: UpperCamelCase__ = 0 else: UpperCamelCase__ = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=a , clean_up_tokenization_spaces=a , ) ) if return_type == ReturnType.FULL_TEXT: UpperCamelCase__ = prompt_text + text[prompt_length:] else: UpperCamelCase__ = text[prompt_length:] UpperCamelCase__ = {"generated_text": all_text} records.append(a ) return records
80
0
"""simple docstring""" import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging SCREAMING_SNAKE_CASE = logging.get_logger(__name__) def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str: A__ = set() A__ = [] def parse_line(lowercase_ ): for line in fp: if isinstance(lowercase_ , lowercase_ ): A__ = line.decode("UTF-8" ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(" " ): # process a single warning and move it to `selected_warnings`. if len(lowercase_ ) > 0: A__ = "\n".join(lowercase_ ) # Only keep the warnings specified in `targets` if any(f""": {x}: """ in warning for x in targets ): selected_warnings.add(lowercase_ ) buffer.clear() continue else: A__ = line.strip() buffer.append(lowercase_ ) if from_gh: for filename in os.listdir(lowercase_ ): A__ = os.path.join(lowercase_ , lowercase_ ) if not os.path.isdir(lowercase_ ): # read the file if filename != "warnings.txt": continue with open(lowercase_ ) as fp: parse_line(lowercase_ ) else: try: with zipfile.ZipFile(lowercase_ ) as z: for filename in z.namelist(): if not os.path.isdir(lowercase_ ): # read the file if filename != "warnings.txt": continue with z.open(lowercase_ ) as fp: parse_line(lowercase_ ) except Exception: logger.warning( f"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" ) return selected_warnings def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> List[str]: A__ = set() A__ = [os.path.join(lowercase_ , lowercase_ ) for p in os.listdir(lowercase_ ) if (p.endswith(".zip" ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(lowercase_ , lowercase_ ) ) return selected_warnings if __name__ == "__main__": def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> str: return values.split("," ) SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") parser.add_argument( "--output_dir", type=str, required=True, help="Where to store the downloaded artifacts and other result files.", ) parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.") # optional parameters parser.add_argument( "--targets", default="DeprecationWarning,UserWarning,FutureWarning", type=list_str, help="Comma-separated list of target warning(s) which we want to extract.", ) parser.add_argument( "--from_gh", action="store_true", help="If running from a GitHub action workflow and collecting warnings from its artifacts.", ) SCREAMING_SNAKE_CASE = parser.parse_args() SCREAMING_SNAKE_CASE = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links SCREAMING_SNAKE_CASE = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("=" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts SCREAMING_SNAKE_CASE = extract_warnings(args.output_dir, args.targets) SCREAMING_SNAKE_CASE = sorted(selected_warnings) with open(os.path.join(args.output_dir, "selected_warnings.json"), "w", encoding="UTF-8") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
230
"""simple docstring""" import argparse import os import transformers from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS from .utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE = logging.get_logger(__name__) SCREAMING_SNAKE_CASE = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS} def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any: if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES: raise ValueError(f"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" ) if tokenizer_name is None: A__ = TOKENIZER_CLASSES else: A__ = {tokenizer_name: getattr(lowercase_ , tokenizer_name + "Fast" )} logger.info(f"""Loading tokenizer classes: {tokenizer_names}""" ) for tokenizer_name in tokenizer_names: A__ = TOKENIZER_CLASSES[tokenizer_name] A__ = True if checkpoint_name is None: A__ = list(tokenizer_class.max_model_input_sizes.keys() ) else: A__ = [checkpoint_name] logger.info(f"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" ) for checkpoint in checkpoint_names: logger.info(f"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" ) # Load tokenizer A__ = tokenizer_class.from_pretrained(lowercase_ , force_download=lowercase_ ) # Save fast tokenizer logger.info(f"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" ) # For organization names we create sub-directories if "/" in checkpoint: A__, A__ = checkpoint.split("/" ) A__ = os.path.join(lowercase_ , lowercase_ ) elif add_prefix: A__ = checkpoint A__ = dump_path else: A__ = None A__ = dump_path logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" ) if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]: A__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint] A__ = file_path.split(lowercase_ )[-1][0] if next_char == "/": A__ = os.path.join(lowercase_ , lowercase_ ) A__ = None logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" ) A__ = tokenizer.save_pretrained( lowercase_ , legacy_format=lowercase_ , filename_prefix=lowercase_ ) logger.info(f"""=> File names {file_names}""" ) for file_name in file_names: if not file_name.endswith("tokenizer.json" ): os.remove(lowercase_ ) logger.info(f"""=> removing {file_name}""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( "--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files." ) parser.add_argument( "--tokenizer_name", default=None, type=str, help=( f'Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will ' "download and convert all the checkpoints from AWS." ), ) parser.add_argument( "--checkpoint_name", default=None, type=str, help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.", ) parser.add_argument( "--force_download", action="store_true", help="Re-download checkpoints.", ) SCREAMING_SNAKE_CASE = parser.parse_args() convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
230
1